ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 7515dc56c124
children 33d0cf9474f9
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/xen.h>
11 #include <xen/mm.h>
12 #include <asm/ia64_int.h>
13 #include <asm/vcpu.h>
14 #include <asm/regionreg.h>
15 #include <asm/tlb.h>
16 #include <asm/processor.h>
17 #include <asm/delay.h>
18 #include <asm/vmx_vcpu.h>
19 #include <asm/vhpt.h>
20 #include <asm/tlbflush.h>
21 #include <asm/privop.h>
22 #include <xen/event.h>
23 #include <asm/vmx_phy_mode.h>
24 #include <asm/bundle.h>
25 #include <asm/privop_stat.h>
26 #include <asm/uaccess.h>
27 #include <asm/p2m_entry.h>
28 #include <asm/tlb_track.h>
30 /* FIXME: where these declarations should be there ? */
31 extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
32 struct pt_regs *regs);
33 extern void setreg(unsigned long regnum, unsigned long val, int nat,
34 struct pt_regs *regs);
35 extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
36 struct pt_regs *regs);
38 extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
39 struct pt_regs *regs);
41 typedef union {
42 struct ia64_psr ia64_psr;
43 unsigned long i64;
44 } PSR;
46 // this def for vcpu_regs won't work if kernel stack is present
47 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
49 #define IA64_PTA_SZ_BIT 2
50 #define IA64_PTA_VF_BIT 8
51 #define IA64_PTA_BASE_BIT 15
52 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
54 #define IA64_PSR_NON_VIRT_BITS \
55 (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | \
56 IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK | \
57 IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB | \
58 IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID | \
59 IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | \
60 IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
62 unsigned long vcpu_verbose = 0;
64 /**************************************************************************
65 VCPU general register access routines
66 **************************************************************************/
67 #ifdef XEN
68 u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
69 {
70 REGS *regs = vcpu_regs(vcpu);
71 u64 val;
73 if (!reg)
74 return 0;
75 getreg(reg, &val, 0, regs); // FIXME: handle NATs later
76 return val;
77 }
79 IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
80 {
81 REGS *regs = vcpu_regs(vcpu);
82 int nat;
84 getreg(reg, val, &nat, regs); // FIXME: handle NATs later
85 if (nat)
86 return IA64_NAT_CONSUMPTION_VECTOR;
87 return 0;
88 }
90 // returns:
91 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
92 // IA64_NO_FAULT otherwise
93 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
94 {
95 REGS *regs = vcpu_regs(vcpu);
96 long sof = (regs->cr_ifs) & 0x7f;
98 if (!reg)
99 return IA64_ILLOP_FAULT;
100 if (reg >= sof + 32)
101 return IA64_ILLOP_FAULT;
102 setreg(reg, value, nat, regs); // FIXME: handle NATs later
103 return IA64_NO_FAULT;
104 }
106 IA64FAULT
107 vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
108 {
109 REGS *regs = vcpu_regs(vcpu);
110 getfpreg(reg, val, regs); // FIXME: handle NATs later
111 return IA64_NO_FAULT;
112 }
114 IA64FAULT
115 vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
116 {
117 REGS *regs = vcpu_regs(vcpu);
118 if (reg > 1)
119 setfpreg(reg, val, regs); // FIXME: handle NATs later
120 return IA64_NO_FAULT;
121 }
123 #else
124 // returns:
125 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
126 // IA64_NO_FAULT otherwise
127 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
128 {
129 REGS *regs = vcpu_regs(vcpu);
130 long sof = (regs->cr_ifs) & 0x7f;
132 if (!reg)
133 return IA64_ILLOP_FAULT;
134 if (reg >= sof + 32)
135 return IA64_ILLOP_FAULT;
136 setreg(reg, value, 0, regs); // FIXME: handle NATs later
137 return IA64_NO_FAULT;
138 }
140 #endif
142 void vcpu_init_regs(struct vcpu *v)
143 {
144 struct pt_regs *regs;
146 regs = vcpu_regs(v);
147 if (VMX_DOMAIN(v)) {
148 /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
149 regs->cr_ipsr = IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT |
150 IA64_PSR_I | IA64_PSR_IC | IA64_PSR_SI |
151 IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_VM;
152 /* lazy fp */
153 FP_PSR(v) = IA64_PSR_DFH;
154 regs->cr_ipsr |= IA64_PSR_DFH;
155 } else {
156 regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
157 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
158 regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
159 | IA64_PSR_RI | IA64_PSR_IS);
160 // domain runs at PL2
161 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
162 // lazy fp
163 PSCB(v, hpsr_dfh) = 1;
164 PSCB(v, hpsr_mfh) = 0;
165 regs->cr_ipsr |= IA64_PSR_DFH;
166 }
167 regs->cr_ifs = 1UL << 63; /* or clear? */
168 regs->ar_fpsr = FPSR_DEFAULT;
170 if (VMX_DOMAIN(v)) {
171 vmx_init_all_rr(v);
172 /* Virtual processor context setup */
173 VCPU(v, vpsr) = IA64_PSR_BN;
174 VCPU(v, dcr) = 0;
175 } else {
176 init_all_rr(v);
177 regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
178 VCPU(v, banknum) = 1;
179 VCPU(v, metaphysical_mode) = 1;
180 VCPU(v, interrupt_mask_addr) =
181 (unsigned char *)v->domain->arch.shared_info_va +
182 INT_ENABLE_OFFSET(v);
183 VCPU(v, itv) = (1 << 16); /* timer vector masked */
185 v->vcpu_info->evtchn_upcall_pending = 0;
186 v->vcpu_info->evtchn_upcall_mask = -1;
187 }
189 /* pta.size must not be 0. The minimum is 15 (32k) */
190 VCPU(v, pta) = 15 << 2;
192 v->arch.domain_itm_last = -1L;
193 }
195 /**************************************************************************
196 VCPU privileged application register access routines
197 **************************************************************************/
199 void vcpu_load_kernel_regs(VCPU * vcpu)
200 {
201 ia64_set_kr(0, VCPU(vcpu, krs[0]));
202 ia64_set_kr(1, VCPU(vcpu, krs[1]));
203 ia64_set_kr(2, VCPU(vcpu, krs[2]));
204 ia64_set_kr(3, VCPU(vcpu, krs[3]));
205 ia64_set_kr(4, VCPU(vcpu, krs[4]));
206 ia64_set_kr(5, VCPU(vcpu, krs[5]));
207 ia64_set_kr(6, VCPU(vcpu, krs[6]));
208 ia64_set_kr(7, VCPU(vcpu, krs[7]));
209 }
211 /* GCC 4.0.2 seems not to be able to suppress this call!. */
212 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
214 IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
215 {
216 if (reg == 44)
217 return vcpu_set_itc(vcpu, val);
218 else if (reg == 27)
219 return IA64_ILLOP_FAULT;
220 else if (reg == 24)
221 printk("warning: setting ar.eflg is a no-op; no IA-32 "
222 "support\n");
223 else if (reg > 7)
224 return IA64_ILLOP_FAULT;
225 else {
226 PSCB(vcpu, krs[reg]) = val;
227 ia64_set_kr(reg, val);
228 }
229 return IA64_NO_FAULT;
230 }
232 IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
233 {
234 if (reg == 24)
235 printk("warning: getting ar.eflg is a no-op; no IA-32 "
236 "support\n");
237 else if (reg > 7)
238 return IA64_ILLOP_FAULT;
239 else
240 *val = PSCB(vcpu, krs[reg]);
241 return IA64_NO_FAULT;
242 }
244 /**************************************************************************
245 VCPU protection key emulating for PV
246 This first implementation reserves 1 pkr for the hypervisor key.
247 On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the
248 hypervisor may run with psr.pk==1. The key for the hypervisor is 0.
249 Furthermore the VCPU is flagged to use the protection keys.
250 Currently the domU has to take care of the used keys, because on setting
251 a pkr there is no check against other pkr's whether this key is already
252 used.
253 **************************************************************************/
255 /* The function loads the protection key registers from the struct arch_vcpu
256 * into the processor pkr's! Called in context_switch().
257 * TODO: take care of the order of writing pkr's!
258 */
259 void vcpu_pkr_load_regs(VCPU * vcpu)
260 {
261 int i;
263 for (i = 0; i <= XEN_IA64_NPKRS; i++)
264 ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));
265 }
267 /* The function activates the pkr handling. */
268 static void vcpu_pkr_set_psr_handling(VCPU * vcpu)
269 {
270 if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)
271 return;
273 vcpu_pkr_use_set(vcpu);
274 PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;
276 /* Write the special key for the hypervisor into pkr[15]. */
277 ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);
278 }
280 /**************************************************************************
281 VCPU processor status register access routines
282 **************************************************************************/
284 static void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
285 {
286 /* only do something if mode changes */
287 if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
288 PSCB(vcpu, metaphysical_mode) = newmode;
289 if (newmode)
290 set_metaphysical_rr0();
291 else
292 set_virtual_rr0();
293 }
294 }
296 IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
297 {
298 vcpu_set_metaphysical_mode(vcpu, TRUE);
299 return IA64_NO_FAULT;
300 }
302 IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
303 {
304 struct ia64_psr psr, imm, *ipsr;
305 REGS *regs = vcpu_regs(vcpu);
307 //PRIVOP_COUNT_ADDR(regs,_RSM);
308 // TODO: All of these bits need to be virtualized
309 // TODO: Only allowed for current vcpu
310 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
311 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
312 imm = *(struct ia64_psr *)&imm24;
313 // interrupt flag
314 if (imm.i)
315 vcpu->vcpu_info->evtchn_upcall_mask = 1;
316 if (imm.ic)
317 PSCB(vcpu, interrupt_collection_enabled) = 0;
318 // interrupt collection flag
319 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
320 // just handle psr.up and psr.pp for now
321 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
322 IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
323 IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))
324 return IA64_ILLOP_FAULT;
325 if (imm.dfh) {
326 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
327 PSCB(vcpu, vpsr_dfh) = 0;
328 }
329 if (imm.dfl)
330 ipsr->dfl = 0;
331 if (imm.pp) {
332 // xenoprof:
333 // Don't change psr.pp and ipsr->pp
334 // They are manipulated by xenoprof
335 // psr.pp = 1;
336 // ipsr->pp = 1;
337 PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
338 }
339 if (imm.up) {
340 ipsr->up = 0;
341 psr.up = 0;
342 }
343 if (imm.sp) {
344 ipsr->sp = 0;
345 psr.sp = 0;
346 }
347 if (imm.be)
348 ipsr->be = 0;
349 if (imm.dt)
350 vcpu_set_metaphysical_mode(vcpu, TRUE);
351 if (imm.pk) {
352 ipsr->pk = 0;
353 vcpu_pkr_use_unset(vcpu);
354 }
355 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
356 return IA64_NO_FAULT;
357 }
359 IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
360 {
361 vcpu_set_metaphysical_mode(vcpu, FALSE);
362 return IA64_NO_FAULT;
363 }
365 IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
366 {
367 vcpu->vcpu_info->evtchn_upcall_mask = 0;
368 PSCB(vcpu, interrupt_collection_enabled) = 1;
369 return IA64_NO_FAULT;
370 }
372 IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
373 {
374 struct ia64_psr psr, imm, *ipsr;
375 REGS *regs = vcpu_regs(vcpu);
376 u64 mask, enabling_interrupts = 0;
378 //PRIVOP_COUNT_ADDR(regs,_SSM);
379 // TODO: All of these bits need to be virtualized
380 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
381 imm = *(struct ia64_psr *)&imm24;
382 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
383 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
384 mask =
385 IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
386 IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |
387 IA64_PSR_PK;
388 if (imm24 & ~mask)
389 return IA64_ILLOP_FAULT;
390 if (imm.dfh) {
391 PSCB(vcpu, vpsr_dfh) = 1;
392 ipsr->dfh = 1;
393 }
394 if (imm.dfl)
395 ipsr->dfl = 1;
396 if (imm.pp) {
397 // xenoprof:
398 // Don't change psr.pp and ipsr->pp
399 // They are manipulated by xenoprof
400 // psr.pp = 1;
401 // ipsr->pp = 1;
402 PSCB(vcpu, vpsr_pp) = 1;
403 }
404 if (imm.sp) {
405 ipsr->sp = 1;
406 psr.sp = 1;
407 }
408 if (imm.i) {
409 if (vcpu->vcpu_info->evtchn_upcall_mask) {
410 //printk("vcpu_set_psr_sm: psr.ic 0->1\n");
411 enabling_interrupts = 1;
412 }
413 vcpu->vcpu_info->evtchn_upcall_mask = 0;
414 }
415 if (imm.ic)
416 PSCB(vcpu, interrupt_collection_enabled) = 1;
417 // TODO: do this faster
418 if (imm.mfl) {
419 ipsr->mfl = 1;
420 psr.mfl = 1;
421 }
422 if (imm.mfh) {
423 ipsr->mfh = 1;
424 psr.mfh = 1;
425 }
426 if (imm.ac) {
427 ipsr->ac = 1;
428 psr.ac = 1;
429 }
430 if (imm.up) {
431 ipsr->up = 1;
432 psr.up = 1;
433 }
434 if (imm.be)
435 ipsr->be = 1;
436 if (imm.dt)
437 vcpu_set_metaphysical_mode(vcpu, FALSE);
438 if (imm.pk) {
439 vcpu_pkr_set_psr_handling(vcpu);
440 ipsr->pk = 1;
441 }
442 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
443 if (enabling_interrupts &&
444 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
445 PSCB(vcpu, pending_interruption) = 1;
446 return IA64_NO_FAULT;
447 }
449 IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
450 {
451 struct ia64_psr newpsr, *ipsr;
452 REGS *regs = vcpu_regs(vcpu);
453 u64 enabling_interrupts = 0;
455 newpsr = *(struct ia64_psr *)&val;
456 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
457 // just handle psr.up and psr.pp for now
458 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
459 // return IA64_ILLOP_FAULT;
460 // however trying to set other bits can't be an error as it is in ssm
461 if (newpsr.dfh) {
462 ipsr->dfh = 1;
463 PSCB(vcpu, vpsr_dfh) = 1;
464 } else {
465 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
466 PSCB(vcpu, vpsr_dfh) = 0;
467 }
468 if (newpsr.dfl)
469 ipsr->dfl = 1;
470 if (newpsr.pp) {
471 // xenoprof:
472 // Don't change ipsr->pp
473 // It is manipulated by xenoprof
474 // ipsr->pp = 1;
475 PSCB(vcpu, vpsr_pp) = 1;
476 } else {
477 // xenoprof:
478 // Don't change ipsr->pp
479 // It is manipulated by xenoprof
480 // ipsr->pp = 1;
481 PSCB(vcpu, vpsr_pp) = 0;
482 }
483 if (newpsr.up)
484 ipsr->up = 1;
485 if (newpsr.sp)
486 ipsr->sp = 1;
487 if (newpsr.i) {
488 if (vcpu->vcpu_info->evtchn_upcall_mask)
489 enabling_interrupts = 1;
490 vcpu->vcpu_info->evtchn_upcall_mask = 0;
491 }
492 if (newpsr.ic)
493 PSCB(vcpu, interrupt_collection_enabled) = 1;
494 if (newpsr.mfl)
495 ipsr->mfl = 1;
496 if (newpsr.mfh)
497 ipsr->mfh = 1;
498 if (newpsr.ac)
499 ipsr->ac = 1;
500 if (newpsr.up)
501 ipsr->up = 1;
502 if (newpsr.dt && newpsr.rt)
503 vcpu_set_metaphysical_mode(vcpu, FALSE);
504 else
505 vcpu_set_metaphysical_mode(vcpu, TRUE);
506 if (newpsr.be)
507 ipsr->be = 1;
508 if (newpsr.pk) {
509 vcpu_pkr_set_psr_handling(vcpu);
510 ipsr->pk = 1;
511 } else
512 vcpu_pkr_use_unset(vcpu);
513 if (enabling_interrupts &&
514 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
515 PSCB(vcpu, pending_interruption) = 1;
516 return IA64_NO_FAULT;
517 }
519 IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val)
520 {
521 IA64_PSR newpsr, vpsr;
522 REGS *regs = vcpu_regs(vcpu);
523 u64 enabling_interrupts = 0;
525 /* Copy non-virtualized bits. */
526 newpsr.val = val & IA64_PSR_NON_VIRT_BITS;
528 /* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0) */
529 newpsr.val |= IA64_PSR_DI;
531 newpsr.val |= IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT |
532 IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI;
533 /*
534 * xenoprof:
535 * keep psr.pp unchanged for xenoprof.
536 */
537 if (regs->cr_ipsr & IA64_PSR_PP)
538 newpsr.val |= IA64_PSR_PP;
539 else
540 newpsr.val &= ~IA64_PSR_PP;
542 vpsr.val = val;
544 if (val & IA64_PSR_DFH) {
545 newpsr.dfh = 1;
546 PSCB(vcpu, vpsr_dfh) = 1;
547 } else {
548 newpsr.dfh = PSCB(vcpu, hpsr_dfh);
549 PSCB(vcpu, vpsr_dfh) = 0;
550 }
552 PSCB(vcpu, vpsr_pp) = vpsr.pp;
554 if (vpsr.i) {
555 if (vcpu->vcpu_info->evtchn_upcall_mask)
556 enabling_interrupts = 1;
558 vcpu->vcpu_info->evtchn_upcall_mask = 0;
560 if (enabling_interrupts &&
561 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
562 PSCB(vcpu, pending_interruption) = 1;
563 } else
564 vcpu->vcpu_info->evtchn_upcall_mask = 1;
566 PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
567 vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
569 newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL);
571 if (PSCB(vcpu, banknum) != vpsr.bn) {
572 if (vpsr.bn)
573 vcpu_bsw1(vcpu);
574 else
575 vcpu_bsw0(vcpu);
576 }
577 if (vpsr.pk) {
578 vcpu_pkr_set_psr_handling(vcpu);
579 newpsr.pk = 1;
580 } else
581 vcpu_pkr_use_unset(vcpu);
583 regs->cr_ipsr = newpsr.val;
585 return IA64_NO_FAULT;
586 }
588 u64 vcpu_get_psr(VCPU * vcpu)
589 {
590 REGS *regs = vcpu_regs(vcpu);
591 PSR newpsr;
592 PSR ipsr;
594 ipsr.i64 = regs->cr_ipsr;
596 /* Copy non-virtualized bits. */
597 newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS;
599 /* Bits forced to 1 (psr.si and psr.is are forced to 0) */
600 newpsr.i64 |= IA64_PSR_DI;
602 /* System mask. */
603 newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
604 newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
606 if (!PSCB(vcpu, metaphysical_mode))
607 newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
609 newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
610 newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
612 /* Fool cpl. */
613 if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
614 newpsr.ia64_psr.cpl = 0;
615 else
616 newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
618 newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
620 return newpsr.i64;
621 }
623 IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
624 {
625 u64 psr = vcpu_get_psr(vcpu);
626 *pval = psr & (MASK(0, 32) | MASK(35, 2));
627 return IA64_NO_FAULT;
628 }
630 BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
631 {
632 return !!PSCB(vcpu, interrupt_collection_enabled);
633 }
635 BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
636 {
637 return !vcpu->vcpu_info->evtchn_upcall_mask;
638 }
640 /**************************************************************************
641 VCPU control register access routines
642 **************************************************************************/
644 IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
645 {
646 *pval = PSCB(vcpu, dcr);
647 return IA64_NO_FAULT;
648 }
650 IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
651 {
652 if (VMX_DOMAIN(vcpu))
653 *pval = PSCB(vcpu, iva) & ~0x7fffL;
654 else
655 *pval = PSCBX(vcpu, iva) & ~0x7fffL;
657 return IA64_NO_FAULT;
658 }
660 IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
661 {
662 *pval = PSCB(vcpu, pta);
663 return IA64_NO_FAULT;
664 }
666 IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
667 {
668 //REGS *regs = vcpu_regs(vcpu);
669 //*pval = regs->cr_ipsr;
670 *pval = PSCB(vcpu, ipsr);
671 return IA64_NO_FAULT;
672 }
674 IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
675 {
676 *pval = PSCB(vcpu, isr);
677 return IA64_NO_FAULT;
678 }
680 IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
681 {
682 //REGS *regs = vcpu_regs(vcpu);
683 //*pval = regs->cr_iip;
684 *pval = PSCB(vcpu, iip);
685 return IA64_NO_FAULT;
686 }
688 IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
689 {
690 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
691 *pval = PSCB(vcpu, ifa);
692 return IA64_NO_FAULT;
693 }
695 unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
696 {
697 ia64_rr rr;
699 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
700 return rr.ps;
701 }
703 unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
704 {
705 ia64_rr rr;
707 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
708 return rr.rid;
709 }
711 unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
712 {
713 ia64_rr rr;
715 rr.rrval = 0;
716 rr.ps = vcpu_get_rr_ps(vcpu, ifa);
717 rr.rid = vcpu_get_rr_rid(vcpu, ifa);
718 return rr.rrval;
719 }
721 IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
722 {
723 u64 val = PSCB(vcpu, itir);
724 *pval = val;
725 return IA64_NO_FAULT;
726 }
728 IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
729 {
730 u64 val = PSCB(vcpu, iipa);
731 // SP entry code does not save iipa yet nor does it get
732 // properly delivered in the pscb
733 // printk("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
734 *pval = val;
735 return IA64_NO_FAULT;
736 }
738 IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
739 {
740 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
741 //*pval = PSCB(vcpu,regs).cr_ifs;
742 *pval = PSCB(vcpu, ifs);
743 return IA64_NO_FAULT;
744 }
746 IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
747 {
748 u64 val = PSCB(vcpu, iim);
749 *pval = val;
750 return IA64_NO_FAULT;
751 }
753 IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
754 {
755 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
756 *pval = PSCB(vcpu, iha);
757 return IA64_NO_FAULT;
758 }
760 IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
761 {
762 PSCB(vcpu, dcr) = val;
763 return IA64_NO_FAULT;
764 }
766 IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
767 {
768 if (VMX_DOMAIN(vcpu))
769 PSCB(vcpu, iva) = val & ~0x7fffL;
770 else
771 PSCBX(vcpu, iva) = val & ~0x7fffL;
773 return IA64_NO_FAULT;
774 }
776 IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
777 {
778 if (val & (0x3f << 9)) /* reserved fields */
779 return IA64_RSVDREG_FAULT;
780 if (val & 2) /* reserved fields */
781 return IA64_RSVDREG_FAULT;
782 PSCB(vcpu, pta) = val;
783 return IA64_NO_FAULT;
784 }
786 IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
787 {
788 PSCB(vcpu, ipsr) = val;
789 return IA64_NO_FAULT;
790 }
792 IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
793 {
794 PSCB(vcpu, isr) = val;
795 return IA64_NO_FAULT;
796 }
798 IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
799 {
800 PSCB(vcpu, iip) = val;
801 return IA64_NO_FAULT;
802 }
804 IA64FAULT vcpu_increment_iip(VCPU * vcpu)
805 {
806 REGS *regs = vcpu_regs(vcpu);
807 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
808 if (ipsr->ri == 2) {
809 ipsr->ri = 0;
810 regs->cr_iip += 16;
811 } else
812 ipsr->ri++;
813 return IA64_NO_FAULT;
814 }
816 IA64FAULT vcpu_decrement_iip(VCPU * vcpu)
817 {
818 REGS *regs = vcpu_regs(vcpu);
819 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
821 if (ipsr->ri == 0) {
822 ipsr->ri = 2;
823 regs->cr_iip -= 16;
824 } else
825 ipsr->ri--;
827 return IA64_NO_FAULT;
828 }
830 IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
831 {
832 PSCB(vcpu, ifa) = val;
833 return IA64_NO_FAULT;
834 }
836 IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
837 {
838 PSCB(vcpu, itir) = val;
839 return IA64_NO_FAULT;
840 }
842 IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
843 {
844 // SP entry code does not save iipa yet nor does it get
845 // properly delivered in the pscb
846 // printk("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
847 PSCB(vcpu, iipa) = val;
848 return IA64_NO_FAULT;
849 }
851 IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
852 {
853 //REGS *regs = vcpu_regs(vcpu);
854 PSCB(vcpu, ifs) = val;
855 return IA64_NO_FAULT;
856 }
858 IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
859 {
860 PSCB(vcpu, iim) = val;
861 return IA64_NO_FAULT;
862 }
864 IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
865 {
866 PSCB(vcpu, iha) = val;
867 return IA64_NO_FAULT;
868 }
870 /**************************************************************************
871 VCPU interrupt control register access routines
872 **************************************************************************/
874 void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
875 {
876 PSCB(vcpu, pending_interruption) = 1;
877 }
879 void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
880 {
881 if (vector & ~0xff) {
882 printk("vcpu_pend_interrupt: bad vector\n");
883 return;
884 }
886 if (vcpu->arch.event_callback_ip) {
887 printk("Deprecated interface. Move to new event based "
888 "solution\n");
889 return;
890 }
892 if (VMX_DOMAIN(vcpu)) {
893 set_bit(vector, VCPU(vcpu, irr));
894 } else {
895 set_bit(vector, PSCBX(vcpu, irr));
896 PSCB(vcpu, pending_interruption) = 1;
897 }
898 }
900 #define IA64_TPR_MMI 0x10000
901 #define IA64_TPR_MIC 0x000f0
903 /* checks to see if a VCPU has any unmasked pending interrupts
904 * if so, returns the highest, else returns SPURIOUS_VECTOR */
905 /* NOTE: Since this gets called from vcpu_get_ivr() and the
906 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
907 * this routine also ignores pscb.interrupt_delivery_enabled
908 * and this must be checked independently; see vcpu_deliverable interrupts() */
909 u64 vcpu_check_pending_interrupts(VCPU * vcpu)
910 {
911 u64 *p, *r, bits, bitnum, mask, i, vector;
913 if (vcpu->arch.event_callback_ip)
914 return SPURIOUS_VECTOR;
916 /* Always check pending event, since guest may just ack the
917 * event injection without handle. Later guest may throw out
918 * the event itself.
919 */
920 check_start:
921 if (event_pending(vcpu) &&
922 !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
923 &PSCBX(vcpu, insvc[0])))
924 vcpu_pend_interrupt(vcpu,
925 vcpu->domain->shared_info->arch.
926 evtchn_vector);
928 p = &PSCBX(vcpu, irr[3]);
929 r = &PSCBX(vcpu, insvc[3]);
930 for (i = 3 ;; p--, r--, i--) {
931 bits = *p;
932 if (bits)
933 break; // got a potential interrupt
934 if (*r) {
935 // nothing in this word which is pending+inservice
936 // but there is one inservice which masks lower
937 return SPURIOUS_VECTOR;
938 }
939 if (i == 0) {
940 // checked all bits... nothing pending+inservice
941 return SPURIOUS_VECTOR;
942 }
943 }
944 // have a pending,deliverable interrupt... see if it is masked
945 bitnum = ia64_fls(bits);
946 //printk("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
947 vector = bitnum + (i * 64);
948 mask = 1L << bitnum;
949 /* sanity check for guest timer interrupt */
950 if (vector == (PSCB(vcpu, itv) & 0xff)) {
951 uint64_t now = ia64_get_itc();
952 if (now < PSCBX(vcpu, domain_itm)) {
953 // printk("Ooops, pending guest timer before its due\n");
954 PSCBX(vcpu, irr[i]) &= ~mask;
955 goto check_start;
956 }
957 }
958 //printk("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector);
959 if (*r >= mask) {
960 // masked by equal inservice
961 //printk("but masked by equal inservice\n");
962 return SPURIOUS_VECTOR;
963 }
964 if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
965 // tpr.mmi is set
966 //printk("but masked by tpr.mmi\n");
967 return SPURIOUS_VECTOR;
968 }
969 if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
970 //tpr.mic masks class
971 //printk("but masked by tpr.mic\n");
972 return SPURIOUS_VECTOR;
973 }
974 //printk("returned to caller\n");
975 return vector;
976 }
978 u64 vcpu_deliverable_interrupts(VCPU * vcpu)
979 {
980 return (vcpu_get_psr_i(vcpu) &&
981 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
982 }
984 u64 vcpu_deliverable_timer(VCPU * vcpu)
985 {
986 return (vcpu_get_psr_i(vcpu) &&
987 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
988 }
990 IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
991 {
992 /* Use EID=0, ID=vcpu_id. */
993 *pval = vcpu->vcpu_id << 24;
994 return IA64_NO_FAULT;
995 }
997 IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
998 {
999 int i;
1000 u64 vector, mask;
1002 #define HEARTBEAT_FREQ 16 // period in seconds
1003 #ifdef HEARTBEAT_FREQ
1004 #define N_DOMS 16 // period in seconds
1005 #if 0
1006 static long count[N_DOMS] = { 0 };
1007 #endif
1008 static long nonclockcount[N_DOMS] = { 0 };
1009 unsigned domid = vcpu->domain->domain_id;
1010 #endif
1011 #ifdef IRQ_DEBUG
1012 static char firstivr = 1;
1013 static char firsttime[256];
1014 if (firstivr) {
1015 int i;
1016 for (i = 0; i < 256; i++)
1017 firsttime[i] = 1;
1018 firstivr = 0;
1020 #endif
1022 vector = vcpu_check_pending_interrupts(vcpu);
1023 if (vector == SPURIOUS_VECTOR) {
1024 PSCB(vcpu, pending_interruption) = 0;
1025 *pval = vector;
1026 return IA64_NO_FAULT;
1028 #ifdef HEARTBEAT_FREQ
1029 if (domid >= N_DOMS)
1030 domid = N_DOMS - 1;
1031 #if 0
1032 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1033 if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
1034 printk("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
1035 domid, count[domid], nonclockcount[domid]);
1036 //count[domid] = 0;
1037 //dump_runq();
1040 #endif
1041 else
1042 nonclockcount[domid]++;
1043 #endif
1044 // now have an unmasked, pending, deliverable vector!
1045 // getting ivr has "side effects"
1046 #ifdef IRQ_DEBUG
1047 if (firsttime[vector]) {
1048 printk("*** First get_ivr on vector=%lu,itc=%lx\n",
1049 vector, ia64_get_itc());
1050 firsttime[vector] = 0;
1052 #endif
1053 /* if delivering a timer interrupt, remember domain_itm, which
1054 * needs to be done before clearing irr
1055 */
1056 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1057 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1060 i = vector >> 6;
1061 mask = 1L << (vector & 0x3f);
1062 //printk("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
1063 PSCBX(vcpu, insvc[i]) |= mask;
1064 PSCBX(vcpu, irr[i]) &= ~mask;
1065 //PSCB(vcpu,pending_interruption)--;
1066 *pval = vector;
1067 return IA64_NO_FAULT;
1070 IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
1072 *pval = PSCB(vcpu, tpr);
1073 return IA64_NO_FAULT;
1076 IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
1078 *pval = 0L; // reads of eoi always return 0
1079 return IA64_NO_FAULT;
1082 IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
1084 *pval = PSCBX(vcpu, irr[0]);
1085 return IA64_NO_FAULT;
1088 IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
1090 *pval = PSCBX(vcpu, irr[1]);
1091 return IA64_NO_FAULT;
1094 IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
1096 *pval = PSCBX(vcpu, irr[2]);
1097 return IA64_NO_FAULT;
1100 IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
1102 *pval = PSCBX(vcpu, irr[3]);
1103 return IA64_NO_FAULT;
1106 IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
1108 *pval = PSCB(vcpu, itv);
1109 return IA64_NO_FAULT;
1112 IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
1114 *pval = PSCB(vcpu, pmv);
1115 return IA64_NO_FAULT;
1118 IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
1120 *pval = PSCB(vcpu, cmcv);
1121 return IA64_NO_FAULT;
1124 IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
1126 // fix this when setting values other than m-bit is supported
1127 gdprintk(XENLOG_DEBUG,
1128 "vcpu_get_lrr0: Unmasked interrupts unsupported\n");
1129 *pval = (1L << 16);
1130 return IA64_NO_FAULT;
1133 IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
1135 // fix this when setting values other than m-bit is supported
1136 gdprintk(XENLOG_DEBUG,
1137 "vcpu_get_lrr1: Unmasked interrupts unsupported\n");
1138 *pval = (1L << 16);
1139 return IA64_NO_FAULT;
1142 IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
1144 printk("vcpu_set_lid: Setting cr.lid is unsupported\n");
1145 return IA64_ILLOP_FAULT;
1148 IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
1150 if (val & 0xff00)
1151 return IA64_RSVDREG_FAULT;
1152 PSCB(vcpu, tpr) = val;
1153 /* This can unmask interrupts. */
1154 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1155 PSCB(vcpu, pending_interruption) = 1;
1156 return IA64_NO_FAULT;
1159 IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
1161 u64 *p, bits, vec, bitnum;
1162 int i;
1164 p = &PSCBX(vcpu, insvc[3]);
1165 for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
1167 if (i < 0) {
1168 printk("Trying to EOI interrupt when none are in-service.\n");
1169 return IA64_NO_FAULT;
1171 bitnum = ia64_fls(bits);
1172 vec = bitnum + (i * 64);
1173 /* clear the correct bit */
1174 bits &= ~(1L << bitnum);
1175 *p = bits;
1176 /* clearing an eoi bit may unmask another pending interrupt... */
1177 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
1178 // worry about this later... Linux only calls eoi
1179 // with interrupts disabled
1180 printk("Trying to EOI interrupt with interrupts enabled\n");
1182 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1183 PSCB(vcpu, pending_interruption) = 1;
1184 //printk("YYYYY vcpu_set_eoi: Successful\n");
1185 return IA64_NO_FAULT;
1188 IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
1190 if (!(val & (1L << 16))) {
1191 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1192 return IA64_ILLOP_FAULT;
1194 // no place to save this state but nothing to do anyway
1195 return IA64_NO_FAULT;
1198 IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
1200 if (!(val & (1L << 16))) {
1201 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1202 return IA64_ILLOP_FAULT;
1204 // no place to save this state but nothing to do anyway
1205 return IA64_NO_FAULT;
1208 IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
1210 /* Check reserved fields. */
1211 if (val & 0xef00)
1212 return IA64_ILLOP_FAULT;
1213 PSCB(vcpu, itv) = val;
1214 if (val & 0x10000) {
1215 /* Disable itm. */
1216 PSCBX(vcpu, domain_itm) = 0;
1217 } else
1218 vcpu_set_next_timer(vcpu);
1219 return IA64_NO_FAULT;
1222 IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
1224 if (val & 0xef00) /* reserved fields */
1225 return IA64_RSVDREG_FAULT;
1226 PSCB(vcpu, pmv) = val;
1227 return IA64_NO_FAULT;
1230 IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
1232 if (val & 0xef00) /* reserved fields */
1233 return IA64_RSVDREG_FAULT;
1234 PSCB(vcpu, cmcv) = val;
1235 return IA64_NO_FAULT;
1238 /**************************************************************************
1239 VCPU temporary register access routines
1240 **************************************************************************/
1241 u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
1243 if (index > 7)
1244 return 0;
1245 return PSCB(vcpu, tmp[index]);
1248 void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
1250 if (index <= 7)
1251 PSCB(vcpu, tmp[index]) = val;
1254 /**************************************************************************
1255 Interval timer routines
1256 **************************************************************************/
1258 BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
1260 u64 itv = PSCB(vcpu, itv);
1261 return (!itv || !!(itv & 0x10000));
1264 BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
1266 u64 itv = PSCB(vcpu, itv);
1267 return test_bit(itv, PSCBX(vcpu, insvc));
1270 BOOLEAN vcpu_timer_expired(VCPU * vcpu)
1272 unsigned long domain_itm = PSCBX(vcpu, domain_itm);
1273 unsigned long now = ia64_get_itc();
1275 if (!domain_itm)
1276 return FALSE;
1277 if (now < domain_itm)
1278 return FALSE;
1279 if (vcpu_timer_disabled(vcpu))
1280 return FALSE;
1281 return TRUE;
1284 void vcpu_safe_set_itm(unsigned long val)
1286 unsigned long epsilon = 100;
1287 unsigned long flags;
1288 u64 now = ia64_get_itc();
1290 local_irq_save(flags);
1291 while (1) {
1292 //printk("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1293 ia64_set_itm(val);
1294 if (val > (now = ia64_get_itc()))
1295 break;
1296 val = now + epsilon;
1297 epsilon <<= 1;
1299 local_irq_restore(flags);
1302 void vcpu_set_next_timer(VCPU * vcpu)
1304 u64 d = PSCBX(vcpu, domain_itm);
1305 //u64 s = PSCBX(vcpu,xen_itm);
1306 u64 s = local_cpu_data->itm_next;
1307 u64 now = ia64_get_itc();
1309 /* gloss over the wraparound problem for now... we know it exists
1310 * but it doesn't matter right now */
1312 if (is_idle_domain(vcpu->domain)) {
1313 // printk("****** vcpu_set_next_timer called during idle!!\n");
1314 vcpu_safe_set_itm(s);
1315 return;
1317 //s = PSCBX(vcpu,xen_itm);
1318 if (d && (d > now) && (d < s)) {
1319 vcpu_safe_set_itm(d);
1320 //using_domain_as_itm++;
1321 } else {
1322 vcpu_safe_set_itm(s);
1323 //using_xen_as_itm++;
1327 IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
1329 //UINT now = ia64_get_itc();
1331 //if (val < now) val = now + 1000;
1332 //printk("*** vcpu_set_itm: called with %lx\n",val);
1333 PSCBX(vcpu, domain_itm) = val;
1334 vcpu_set_next_timer(vcpu);
1335 return IA64_NO_FAULT;
1338 IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
1340 #define DISALLOW_SETTING_ITC_FOR_NOW
1341 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1342 static int did_print;
1343 if (!did_print) {
1344 printk("vcpu_set_itc: Setting ar.itc is currently disabled "
1345 "(this message is only displayed once)\n");
1346 did_print = 1;
1348 #else
1349 u64 oldnow = ia64_get_itc();
1350 u64 olditm = PSCBX(vcpu, domain_itm);
1351 unsigned long d = olditm - oldnow;
1352 unsigned long x = local_cpu_data->itm_next - oldnow;
1354 u64 newnow = val, min_delta;
1356 local_irq_disable();
1357 if (olditm) {
1358 printk("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
1359 newnow + d);
1360 PSCBX(vcpu, domain_itm) = newnow + d;
1362 local_cpu_data->itm_next = newnow + x;
1363 d = PSCBX(vcpu, domain_itm);
1364 x = local_cpu_data->itm_next;
1366 ia64_set_itc(newnow);
1367 if (d && (d > newnow) && (d < x)) {
1368 vcpu_safe_set_itm(d);
1369 //using_domain_as_itm++;
1370 } else {
1371 vcpu_safe_set_itm(x);
1372 //using_xen_as_itm++;
1374 local_irq_enable();
1375 #endif
1376 return IA64_NO_FAULT;
1379 IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
1381 //FIXME: Implement this
1382 printk("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1383 return IA64_NO_FAULT;
1384 //return IA64_ILLOP_FAULT;
1387 IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
1389 //TODO: Implement this
1390 printk("vcpu_get_itc: Getting ar.itc is unsupported\n");
1391 return IA64_ILLOP_FAULT;
1394 void vcpu_pend_timer(VCPU * vcpu)
1396 u64 itv = PSCB(vcpu, itv) & 0xff;
1398 if (vcpu_timer_disabled(vcpu))
1399 return;
1400 //if (vcpu_timer_inservice(vcpu)) return;
1401 if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
1402 // already delivered an interrupt for this so
1403 // don't deliver another
1404 return;
1406 if (vcpu->arch.event_callback_ip) {
1407 /* A small window may occur when injecting vIRQ while related
1408 * handler has not been registered. Don't fire in such case.
1409 */
1410 if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
1411 send_guest_vcpu_virq(vcpu, VIRQ_ITC);
1412 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1414 } else
1415 vcpu_pend_interrupt(vcpu, itv);
1418 // returns true if ready to deliver a timer interrupt too early
1419 u64 vcpu_timer_pending_early(VCPU * vcpu)
1421 u64 now = ia64_get_itc();
1422 u64 itm = PSCBX(vcpu, domain_itm);
1424 if (vcpu_timer_disabled(vcpu))
1425 return 0;
1426 if (!itm)
1427 return 0;
1428 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1431 /**************************************************************************
1432 Privileged operation emulation routines
1433 **************************************************************************/
1435 static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
1437 PSCB(vcpu, ifa) = ifa;
1438 PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
1439 vcpu_thash(current, ifa, &PSCB(current, iha));
1442 IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
1444 vcpu_force_tlb_miss(vcpu, ifa);
1445 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
1446 IA64_ALT_INST_TLB_VECTOR;
1449 IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
1451 vcpu_force_tlb_miss(vcpu, ifa);
1452 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
1453 IA64_ALT_DATA_TLB_VECTOR;
1456 IA64FAULT vcpu_rfi(VCPU * vcpu)
1458 u64 ifs;
1459 REGS *regs = vcpu_regs(vcpu);
1461 vcpu_set_psr(vcpu, PSCB(vcpu, ipsr));
1463 ifs = PSCB(vcpu, ifs);
1464 if (ifs & 0x8000000000000000UL)
1465 regs->cr_ifs = ifs;
1467 regs->cr_iip = PSCB(vcpu, iip);
1469 return IA64_NO_FAULT;
1472 IA64FAULT vcpu_cover(VCPU * vcpu)
1474 // TODO: Only allowed for current vcpu
1475 REGS *regs = vcpu_regs(vcpu);
1477 if (!PSCB(vcpu, interrupt_collection_enabled)) {
1478 PSCB(vcpu, ifs) = regs->cr_ifs;
1480 regs->cr_ifs = 0;
1481 return IA64_NO_FAULT;
1484 IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
1486 u64 pta = PSCB(vcpu, pta);
1487 u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1488 u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
1489 u64 Mask = (1L << pta_sz) - 1;
1490 u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1491 u64 compMask_60_15 = ~Mask_60_15;
1492 u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
1493 u64 VHPT_offset = (vadr >> rr_ps) << 3;
1494 u64 VHPT_addr1 = vadr & 0xe000000000000000L;
1495 u64 VHPT_addr2a =
1496 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1497 u64 VHPT_addr2b =
1498 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1499 u64 VHPT_addr3 = VHPT_offset & 0x7fff;
1500 u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1501 VHPT_addr3;
1503 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1504 *pval = VHPT_addr;
1505 return IA64_NO_FAULT;
1508 IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
1510 printk("vcpu_ttag: ttag instruction unsupported\n");
1511 return IA64_ILLOP_FAULT;
1514 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1516 /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps. */
1517 static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
1519 return (b1 <= e2) && (e1 >= b2);
1522 /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
1523 Note: LSBs of base inside page_size are ignored. */
1524 static inline void
1525 check_xen_space_overlap(const char *func, u64 base, u64 page_size)
1527 /* Overlaps can occur only in region 7.
1528 (This is an optimization to bypass all the checks). */
1529 if (REGION_NUMBER(base) != 7)
1530 return;
1532 /* Mask LSBs of base. */
1533 base &= ~(page_size - 1);
1535 /* FIXME: ideally an MCA should be generated... */
1536 if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
1537 base, base + page_size)
1538 || range_overlap(current->domain->arch.shared_info_va,
1539 current->domain->arch.shared_info_va
1540 + XSI_SIZE + XMAPPEDREGS_SIZE,
1541 base, base + page_size))
1542 panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
1543 func, base);
1546 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
1547 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
1548 u64 rid)
1550 return trp->rid == rid
1551 && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
1554 static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
1556 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
1559 static inline int
1560 vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
1562 return trp->rid == rid
1563 && trp->pte.p
1564 && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
1568 static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
1569 BOOLEAN is_data)
1571 unsigned char *regions;
1572 TR_ENTRY *trp;
1573 int tr_max;
1574 int i;
1576 if (is_data) {
1577 // data
1578 regions = &vcpu->arch.dtr_regions;
1579 trp = vcpu->arch.dtrs;
1580 tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
1581 } else {
1582 // instruction
1583 regions = &vcpu->arch.itr_regions;
1584 trp = vcpu->arch.itrs;
1585 tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
1588 if (!vcpu_quick_region_check(*regions, va)) {
1589 return NULL;
1591 for (i = 0; i < tr_max; i++, trp++) {
1592 if (vcpu_match_tr_entry(trp, va, rid)) {
1593 return trp;
1596 return NULL;
1599 // return value
1600 // 0: failure
1601 // 1: success
1602 int
1603 vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
1604 IA64_BUNDLE * bundle)
1606 u64 gpip; // guest pseudo phyiscal ip
1607 unsigned long vaddr;
1608 struct page_info *page;
1610 again:
1611 #if 0
1612 // Currently xen doesn't track psr.it bits.
1613 // it assumes always psr.it = 1.
1614 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
1615 gpip = gip;
1616 } else
1617 #endif
1619 unsigned long region = REGION_NUMBER(gip);
1620 unsigned long rr = PSCB(vcpu, rrs)[region];
1621 unsigned long rid = rr & RR_RID_MASK;
1622 BOOLEAN swap_rr0;
1623 TR_ENTRY *trp;
1625 // vcpu->arch.{i, d}tlb are volatile,
1626 // copy its value to the variable, tr, before use.
1627 TR_ENTRY tr;
1629 trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
1630 if (trp != NULL) {
1631 tr = *trp;
1632 goto found;
1634 // When it failed to get a bundle, itlb miss is reflected.
1635 // Last itc.i value is cached to PSCBX(vcpu, itlb).
1636 tr = PSCBX(vcpu, itlb);
1637 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1638 //dprintk(XENLOG_WARNING,
1639 // "%s gip 0x%lx gpip 0x%lx\n", __func__,
1640 // gip, gpip);
1641 goto found;
1643 trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
1644 if (trp != NULL) {
1645 tr = *trp;
1646 goto found;
1648 #if 0
1649 tr = PSCBX(vcpu, dtlb);
1650 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1651 goto found;
1653 #endif
1655 // try to access gip with guest virtual address
1656 // This may cause tlb miss. see vcpu_translate(). Be careful!
1657 swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
1658 if (swap_rr0) {
1659 set_virtual_rr0();
1661 *bundle = __get_domain_bundle(gip);
1662 if (swap_rr0) {
1663 set_metaphysical_rr0();
1665 if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
1666 dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);
1667 return 0;
1669 return 1;
1671 found:
1672 gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
1673 (gip & ((1 << tr.ps) - 1));
1676 vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
1677 page = virt_to_page(vaddr);
1678 if (get_page(page, vcpu->domain) == 0) {
1679 if (page_get_owner(page) != vcpu->domain) {
1680 // This page might be a page granted by another
1681 // domain.
1682 panic_domain(regs, "domain tries to execute foreign "
1683 "domain page which might be mapped by "
1684 "grant table.\n");
1686 goto again;
1688 *bundle = *((IA64_BUNDLE *) vaddr);
1689 put_page(page);
1690 return 1;
1693 IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
1694 u64 * pteval, u64 * itir, u64 * iha)
1696 unsigned long region = address >> 61;
1697 unsigned long pta, rid, rr, key = 0;
1698 union pte_flags pte;
1699 TR_ENTRY *trp;
1701 if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
1702 // dom0 may generate an uncacheable physical address (msb=1)
1703 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1704 // FIXME: This seems to happen even though it shouldn't. Need to track
1705 // this down, but since it has been apparently harmless, just flag it for now
1706 // panic_domain(vcpu_regs(vcpu),
1708 /*
1709 * Guest may execute itc.d and rfi with psr.dt=0
1710 * When VMM try to fetch opcode, tlb miss may happen,
1711 * At this time PSCB(vcpu,metaphysical_mode)=1,
1712 * region=5,VMM need to handle this tlb miss as if
1713 * PSCB(vcpu,metaphysical_mode)=0
1714 */
1715 printk("vcpu_translate: bad physical address: 0x%lx "
1716 "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
1718 } else {
1719 *pteval = (address & _PAGE_PPN_MASK) |
1720 __DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
1721 *itir = vcpu->arch.vhpt_pg_shift << 2;
1722 perfc_incr(phys_translate);
1723 return IA64_NO_FAULT;
1725 } else if (!region && warn_region0_address) {
1726 REGS *regs = vcpu_regs(vcpu);
1727 unsigned long viip = PSCB(vcpu, iip);
1728 unsigned long vipsr = PSCB(vcpu, ipsr);
1729 unsigned long iip = regs->cr_iip;
1730 unsigned long ipsr = regs->cr_ipsr;
1731 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
1732 "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1733 address, viip, vipsr, iip, ipsr);
1736 rr = PSCB(vcpu, rrs)[region];
1737 rid = rr & RR_RID_MASK;
1738 if (is_data) {
1739 trp = vcpu_tr_lookup(vcpu, address, rid, 1);
1740 if (trp != NULL) {
1741 *pteval = trp->pte.val;
1742 *itir = trp->itir;
1743 perfc_incr(tr_translate);
1744 return IA64_NO_FAULT;
1747 // FIXME?: check itr's for data accesses too, else bad things happen?
1748 /* else */ {
1749 trp = vcpu_tr_lookup(vcpu, address, rid, 0);
1750 if (trp != NULL) {
1751 *pteval = trp->pte.val;
1752 *itir = trp->itir;
1753 perfc_incr(tr_translate);
1754 return IA64_NO_FAULT;
1758 /* check 1-entry TLB */
1759 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1760 trp = &vcpu->arch.dtlb;
1761 pte = trp->pte;
1762 if ( /* is_data && */ pte.p
1763 && vcpu_match_tr_entry_no_p(trp, address, rid)) {
1764 *pteval = pte.val;
1765 *itir = trp->itir;
1766 perfc_incr(dtlb_translate);
1767 return IA64_USE_TLB;
1770 /* check guest VHPT */
1771 pta = PSCB(vcpu, pta);
1773 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1774 // note: architecturally, iha is optionally set for alt faults but
1775 // xenlinux depends on it so should document it as part of PV interface
1776 vcpu_thash(vcpu, address, iha);
1777 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
1778 REGS *regs = vcpu_regs(vcpu);
1779 struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);
1781 /* Optimization for identity mapped region 7 OS (linux) */
1782 if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG &&
1783 region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
1784 pte.val = address & _PAGE_PPN_MASK;
1785 pte.val = pte.val | optf->im_reg7.pgprot;
1786 key = optf->im_reg7.key;
1787 goto out;
1789 return is_data ? IA64_ALT_DATA_TLB_VECTOR :
1790 IA64_ALT_INST_TLB_VECTOR;
1793 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1794 /*
1795 * minimal support: vhpt walker is really dumb and won't find
1796 * anything
1797 */
1798 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1800 /* avoid recursively walking (short format) VHPT */
1801 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1802 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1804 if (!__access_ok(*iha)
1805 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1806 // virtual VHPT walker "missed" in TLB
1807 return IA64_VHPT_FAULT;
1809 /*
1810 * Optimisation: this VHPT walker aborts on not-present pages
1811 * instead of inserting a not-present translation, this allows
1812 * vectoring directly to the miss handler.
1813 */
1814 if (!pte.p)
1815 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1817 /* found mapping in guest VHPT! */
1818 out:
1819 *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY);
1820 *pteval = pte.val;
1821 perfc_incr(vhpt_translate);
1822 return IA64_NO_FAULT;
1825 IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
1827 u64 pteval, itir, mask, iha;
1828 IA64FAULT fault;
1830 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1831 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
1832 mask = itir_mask(itir);
1833 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1834 return IA64_NO_FAULT;
1836 return vcpu_force_data_miss(vcpu, vadr);
1839 IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
1841 u64 pteval, itir, iha;
1842 IA64FAULT fault;
1844 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1845 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
1846 *key = itir & IA64_ITIR_KEY_MASK;
1847 else
1848 *key = 1;
1850 return IA64_NO_FAULT;
1853 /**************************************************************************
1854 VCPU debug breakpoint register access routines
1855 **************************************************************************/
1857 IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
1859 if (reg >= IA64_NUM_DBG_REGS)
1860 return IA64_RSVDREG_FAULT;
1861 if ((reg & 1) == 0) {
1862 /* Validate address. */
1863 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1864 return IA64_ILLOP_FAULT;
1865 } else {
1866 if (!VMX_DOMAIN(vcpu)) {
1867 /* Mask PL0. */
1868 val &= ~(1UL << 56);
1871 if (val != 0)
1872 vcpu->arch.dbg_used |= (1 << reg);
1873 else
1874 vcpu->arch.dbg_used &= ~(1 << reg);
1875 vcpu->arch.dbr[reg] = val;
1876 if (vcpu == current)
1877 ia64_set_dbr(reg, val);
1878 return IA64_NO_FAULT;
1881 IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
1883 if (reg >= IA64_NUM_DBG_REGS)
1884 return IA64_RSVDREG_FAULT;
1885 if ((reg & 1) == 0) {
1886 /* Validate address. */
1887 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1888 return IA64_ILLOP_FAULT;
1889 } else {
1890 if (!VMX_DOMAIN(vcpu)) {
1891 /* Mask PL0. */
1892 val &= ~(1UL << 56);
1895 if (val != 0)
1896 vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));
1897 else
1898 vcpu->arch.dbg_used &= ~(1 << (reg + IA64_NUM_DBG_REGS));
1899 vcpu->arch.ibr[reg] = val;
1900 if (vcpu == current)
1901 ia64_set_ibr(reg, val);
1902 return IA64_NO_FAULT;
1905 IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
1907 if (reg >= IA64_NUM_DBG_REGS)
1908 return IA64_RSVDREG_FAULT;
1909 *pval = vcpu->arch.dbr[reg];
1910 return IA64_NO_FAULT;
1913 IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
1915 if (reg >= IA64_NUM_DBG_REGS)
1916 return IA64_RSVDREG_FAULT;
1917 *pval = vcpu->arch.ibr[reg];
1918 return IA64_NO_FAULT;
1921 /**************************************************************************
1922 VCPU performance monitor register access routines
1923 **************************************************************************/
1925 IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
1927 // TODO: Should set Logical CPU state, not just physical
1928 // NOTE: Writes to unimplemented PMC registers are discarded
1929 #ifdef DEBUG_PFMON
1930 printk("vcpu_set_pmc(%x,%lx)\n", reg, val);
1931 #endif
1932 ia64_set_pmc(reg, val);
1933 return IA64_NO_FAULT;
1936 IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
1938 // TODO: Should set Logical CPU state, not just physical
1939 // NOTE: Writes to unimplemented PMD registers are discarded
1940 #ifdef DEBUG_PFMON
1941 printk("vcpu_set_pmd(%x,%lx)\n", reg, val);
1942 #endif
1943 ia64_set_pmd(reg, val);
1944 return IA64_NO_FAULT;
1947 IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
1949 // NOTE: Reads from unimplemented PMC registers return zero
1950 u64 val = (u64) ia64_get_pmc(reg);
1951 #ifdef DEBUG_PFMON
1952 printk("%lx=vcpu_get_pmc(%x)\n", val, reg);
1953 #endif
1954 *pval = val;
1955 return IA64_NO_FAULT;
1958 IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
1960 // NOTE: Reads from unimplemented PMD registers return zero
1961 u64 val = (u64) ia64_get_pmd(reg);
1962 #ifdef DEBUG_PFMON
1963 printk("%lx=vcpu_get_pmd(%x)\n", val, reg);
1964 #endif
1965 *pval = val;
1966 return IA64_NO_FAULT;
1969 /**************************************************************************
1970 VCPU banked general register access routines
1971 **************************************************************************/
1972 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1973 do{ \
1974 __asm__ __volatile__ ( \
1975 ";;extr.u %0 = %3,%6,16;;\n" \
1976 "dep %1 = %0, %1, 0, 16;;\n" \
1977 "st8 [%4] = %1\n" \
1978 "extr.u %0 = %2, 16, 16;;\n" \
1979 "dep %3 = %0, %3, %6, 16;;\n" \
1980 "st8 [%5] = %3\n" \
1981 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1982 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1983 }while(0)
1985 IA64FAULT vcpu_bsw0(VCPU * vcpu)
1987 // TODO: Only allowed for current vcpu
1988 REGS *regs = vcpu_regs(vcpu);
1989 unsigned long *r = &regs->r16;
1990 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
1991 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
1992 unsigned long *runat = &regs->eml_unat;
1993 unsigned long *b0unat = &PSCB(vcpu, vbnat);
1994 unsigned long *b1unat = &PSCB(vcpu, vnat);
1996 unsigned long i;
1998 if (VMX_DOMAIN(vcpu)) {
1999 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
2000 for (i = 0; i < 16; i++) {
2001 *b1++ = *r;
2002 *r++ = *b0++;
2004 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
2005 IA64_PT_REGS_R16_SLOT);
2006 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
2008 } else {
2009 if (PSCB(vcpu, banknum)) {
2010 for (i = 0; i < 16; i++) {
2011 *b1++ = *r;
2012 *r++ = *b0++;
2014 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
2015 IA64_PT_REGS_R16_SLOT);
2016 PSCB(vcpu, banknum) = 0;
2019 return IA64_NO_FAULT;
2022 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT) \
2023 do { \
2024 __asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n" \
2025 "dep %1 = %0, %1, 16, 16;;\n" \
2026 "st8 [%4] = %1\n" \
2027 "extr.u %0 = %2, 0, 16;;\n" \
2028 "dep %3 = %0, %3, %6, 16;;\n" \
2029 "st8 [%5] = %3\n" \
2030 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
2031 "r"(*runat), "r"(b0unat), "r"(runat), \
2032 "i"(IA64_PT_REGS_R16_SLOT): "memory"); \
2033 } while(0)
2035 IA64FAULT vcpu_bsw1(VCPU * vcpu)
2037 // TODO: Only allowed for current vcpu
2038 REGS *regs = vcpu_regs(vcpu);
2039 unsigned long *r = &regs->r16;
2040 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
2041 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
2042 unsigned long *runat = &regs->eml_unat;
2043 unsigned long *b0unat = &PSCB(vcpu, vbnat);
2044 unsigned long *b1unat = &PSCB(vcpu, vnat);
2046 unsigned long i;
2048 if (VMX_DOMAIN(vcpu)) {
2049 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
2050 for (i = 0; i < 16; i++) {
2051 *b0++ = *r;
2052 *r++ = *b1++;
2054 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2055 IA64_PT_REGS_R16_SLOT);
2056 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
2058 } else {
2059 if (!PSCB(vcpu, banknum)) {
2060 for (i = 0; i < 16; i++) {
2061 *b0++ = *r;
2062 *r++ = *b1++;
2064 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2065 IA64_PT_REGS_R16_SLOT);
2066 PSCB(vcpu, banknum) = 1;
2069 return IA64_NO_FAULT;
2072 /**************************************************************************
2073 VCPU cpuid access routines
2074 **************************************************************************/
2076 IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
2078 // FIXME: This could get called as a result of a rsvd-reg fault
2079 // if reg > 3
2080 switch (reg) {
2081 case 0:
2082 memcpy(pval, "Xen/ia64", 8);
2083 break;
2084 case 1:
2085 *pval = 0;
2086 break;
2087 case 2:
2088 *pval = 0;
2089 break;
2090 case 3:
2091 *pval = ia64_get_cpuid(3);
2092 break;
2093 case 4:
2094 *pval = ia64_get_cpuid(4);
2095 break;
2096 default:
2097 if (reg > (ia64_get_cpuid(3) & 0xff))
2098 return IA64_RSVDREG_FAULT;
2099 *pval = ia64_get_cpuid(reg);
2100 break;
2102 return IA64_NO_FAULT;
2105 /**************************************************************************
2106 VCPU region register access routines
2107 **************************************************************************/
2109 unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
2111 ia64_rr rr;
2113 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
2114 return rr.ve;
2117 IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
2119 if (unlikely(is_reserved_rr_field(vcpu, val))) {
2120 gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
2121 return IA64_RSVDREG_FAULT;
2124 PSCB(vcpu, rrs)[reg >> 61] = val;
2125 if (likely(vcpu == current)) {
2126 int rc = set_one_rr(reg, val);
2127 BUG_ON(rc == 0);
2129 return IA64_NO_FAULT;
2132 IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
2134 if (VMX_DOMAIN(vcpu))
2135 *pval = VMX(vcpu, vrr[reg >> 61]);
2136 else
2137 *pval = PSCB(vcpu, rrs)[reg >> 61];
2139 return IA64_NO_FAULT;
2142 IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
2143 u64 val3, u64 val4)
2145 u64 reg0 = 0x0000000000000000UL;
2146 u64 reg1 = 0x2000000000000000UL;
2147 u64 reg2 = 0x4000000000000000UL;
2148 u64 reg3 = 0x6000000000000000UL;
2149 u64 reg4 = 0x8000000000000000UL;
2151 if (unlikely(is_reserved_rr_field(vcpu, val0) ||
2152 is_reserved_rr_field(vcpu, val1) ||
2153 is_reserved_rr_field(vcpu, val2) ||
2154 is_reserved_rr_field(vcpu, val3) ||
2155 is_reserved_rr_field(vcpu, val4))) {
2156 gdprintk(XENLOG_DEBUG,
2157 "use of invalid rrval %lx %lx %lx %lx %lx\n",
2158 val0, val1, val2, val3, val4);
2159 return IA64_RSVDREG_FAULT;
2162 PSCB(vcpu, rrs)[reg0 >> 61] = val0;
2163 PSCB(vcpu, rrs)[reg1 >> 61] = val1;
2164 PSCB(vcpu, rrs)[reg2 >> 61] = val2;
2165 PSCB(vcpu, rrs)[reg3 >> 61] = val3;
2166 PSCB(vcpu, rrs)[reg4 >> 61] = val4;
2167 if (likely(vcpu == current)) {
2168 int rc;
2169 rc = !set_one_rr(reg0, val0);
2170 rc |= !set_one_rr(reg1, val1);
2171 rc |= !set_one_rr(reg2, val2);
2172 rc |= !set_one_rr(reg3, val3);
2173 rc |= !set_one_rr(reg4, val4);
2174 BUG_ON(rc != 0);
2176 return IA64_NO_FAULT;
2179 /**************************************************************************
2180 VCPU protection key register access routines
2181 **************************************************************************/
2183 IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
2185 if (reg > XEN_IA64_NPKRS)
2186 return IA64_RSVDREG_FAULT; /* register index to large */
2188 *pval = (u64) PSCBX(vcpu, pkrs[reg]);
2189 return IA64_NO_FAULT;
2192 IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
2194 ia64_pkr_t pkr_new;
2196 if (reg >= XEN_IA64_NPKRS)
2197 return IA64_RSVDREG_FAULT; /* index to large */
2199 pkr_new.val = val;
2200 if (pkr_new.reserved1)
2201 return IA64_RSVDREG_FAULT; /* reserved field */
2203 if (pkr_new.reserved2)
2204 return IA64_RSVDREG_FAULT; /* reserved field */
2206 PSCBX(vcpu, pkrs[reg]) = pkr_new.val;
2207 ia64_set_pkr(reg, pkr_new.val);
2209 return IA64_NO_FAULT;
2212 /**************************************************************************
2213 VCPU translation register access routines
2214 **************************************************************************/
2216 static void
2217 vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
2218 u64 itir, u64 ifa, u64 rid)
2220 u64 ps;
2221 union pte_flags new_pte;
2223 trp->itir = itir;
2224 trp->rid = rid;
2225 ps = trp->ps;
2226 new_pte.val = pte;
2227 if (new_pte.pl < CONFIG_CPL0_EMUL)
2228 new_pte.pl = CONFIG_CPL0_EMUL;
2229 trp->vadr = ifa & ~0xfff;
2230 if (ps > 12) { // "ignore" relevant low-order bits
2231 new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
2232 trp->vadr &= ~((1UL << ps) - 1);
2235 /* Atomic write. */
2236 trp->pte.val = new_pte.val;
2239 static inline void
2240 vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
2242 vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
2243 VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
2246 IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
2247 u64 itir, u64 ifa)
2249 TR_ENTRY *trp;
2251 if (slot >= NDTRS)
2252 return IA64_RSVDREG_FAULT;
2254 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2256 trp = &PSCBX(vcpu, dtrs[slot]);
2257 //printk("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
2258 vcpu_set_tr_entry(trp, pte, itir, ifa);
2259 vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
2261 /*
2262 * FIXME According to spec, vhpt should be purged, but this
2263 * incurs considerable performance loss, since it is safe for
2264 * linux not to purge vhpt, vhpt purge is disabled until a
2265 * feasible way is found.
2267 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2268 */
2270 return IA64_NO_FAULT;
2273 IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
2274 u64 itir, u64 ifa)
2276 TR_ENTRY *trp;
2278 if (slot >= NITRS)
2279 return IA64_RSVDREG_FAULT;
2281 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2283 trp = &PSCBX(vcpu, itrs[slot]);
2284 //printk("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
2285 vcpu_set_tr_entry(trp, pte, itir, ifa);
2286 vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
2288 /*
2289 * FIXME According to spec, vhpt should be purged, but this
2290 * incurs considerable performance loss, since it is safe for
2291 * linux not to purge vhpt, vhpt purge is disabled until a
2292 * feasible way is found.
2294 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2295 */
2297 return IA64_NO_FAULT;
2300 IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
2301 u64 itir, u64 ifa, u64 rid)
2303 TR_ENTRY *trp;
2305 if (slot >= NITRS)
2306 return IA64_RSVDREG_FAULT;
2307 trp = &PSCBX(vcpu, itrs[slot]);
2308 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2310 /* Recompute the itr_region. */
2311 vcpu->arch.itr_regions = 0;
2312 for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)
2313 if (trp->pte.p)
2314 vcpu_quick_region_set(vcpu->arch.itr_regions,
2315 trp->vadr);
2316 return IA64_NO_FAULT;
2319 IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
2320 u64 itir, u64 ifa, u64 rid)
2322 TR_ENTRY *trp;
2324 if (slot >= NDTRS)
2325 return IA64_RSVDREG_FAULT;
2326 trp = &PSCBX(vcpu, dtrs[slot]);
2327 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2329 /* Recompute the dtr_region. */
2330 vcpu->arch.dtr_regions = 0;
2331 for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)
2332 if (trp->pte.p)
2333 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2334 trp->vadr);
2335 return IA64_NO_FAULT;
2338 /**************************************************************************
2339 VCPU translation cache access routines
2340 **************************************************************************/
2342 static void
2343 vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
2345 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
2346 printk("vhpt rebuild: using page_shift %d\n", (int)ps);
2347 vcpu->arch.vhpt_pg_shift = ps;
2348 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2349 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2350 local_vhpt_flush();
2351 load_region_regs(vcpu);
2352 #else
2353 panic_domain(NULL, "domain trying to use smaller page size!\n");
2354 #endif
2357 void
2358 vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
2359 u64 mp_pte, u64 itir, struct p2m_entry *entry)
2361 ia64_itir_t _itir = {.itir = itir};
2362 unsigned long psr;
2364 check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
2366 // FIXME, must be inlined or potential for nested fault here!
2367 if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
2368 panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
2369 "smaller page size!\n");
2371 BUG_ON(_itir.ps > PAGE_SHIFT);
2372 vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
2373 psr = ia64_clear_ic();
2374 pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
2375 // FIXME: look for bigger mappings
2376 ia64_itc(IorD, vaddr, pte, _itir.itir);
2377 ia64_set_psr(psr);
2378 // ia64_srlz_i(); // no srls req'd, will rfi later
2379 if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
2380 // FIXME: this is dangerous... vhpt_flush_address ensures these
2381 // addresses never get flushed. More work needed if this
2382 // ever happens.
2383 //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
2384 if (_itir.ps > vcpu->arch.vhpt_pg_shift)
2385 vhpt_multiple_insert(vaddr, pte, _itir.itir);
2386 else
2387 vhpt_insert(vaddr, pte, _itir.itir);
2389 // even if domain pagesize is larger than PAGE_SIZE, just put
2390 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
2391 else {
2392 vhpt_insert(vaddr, pte, _itir.itir);
2396 IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2398 unsigned long pteval;
2399 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2400 struct p2m_entry entry;
2401 ia64_itir_t _itir = {.itir = itir};
2403 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2404 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2406 again:
2407 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2408 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2409 if (!pteval)
2410 return IA64_ILLOP_FAULT;
2411 if (swap_rr0)
2412 set_virtual_rr0();
2413 vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
2414 if (swap_rr0)
2415 set_metaphysical_rr0();
2416 if (p2m_entry_retry(&entry)) {
2417 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2418 goto again;
2420 vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
2421 return IA64_NO_FAULT;
2424 IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2426 unsigned long pteval;
2427 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2428 struct p2m_entry entry;
2429 ia64_itir_t _itir = {.itir = itir};
2431 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2432 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2434 again:
2435 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2436 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2437 if (!pteval)
2438 return IA64_ILLOP_FAULT;
2439 if (swap_rr0)
2440 set_virtual_rr0();
2441 vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
2442 if (swap_rr0)
2443 set_metaphysical_rr0();
2444 if (p2m_entry_retry(&entry)) {
2445 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2446 goto again;
2448 vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
2449 return IA64_NO_FAULT;
2452 IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
2454 BUG_ON(vcpu != current);
2456 check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
2458 /* Purge TC */
2459 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2460 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2462 /* Purge all tlb and vhpt */
2463 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2465 return IA64_NO_FAULT;
2468 // At privlvl=0, fc performs no access rights or protection key checks, while
2469 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
2470 // read but no protection key check. Thus in order to avoid an unexpected
2471 // access rights fault, we have to translate the virtual address to a
2472 // physical address (possibly via a metaphysical address) and do the fc
2473 // on the physical address, which is guaranteed to flush the same cache line
2474 IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
2476 // TODO: Only allowed for current vcpu
2477 u64 mpaddr, paddr;
2478 IA64FAULT fault;
2480 again:
2481 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
2482 if (fault == IA64_NO_FAULT) {
2483 struct p2m_entry entry;
2484 paddr = translate_domain_mpaddr(mpaddr, &entry);
2485 ia64_fc(__va(paddr));
2486 if (p2m_entry_retry(&entry))
2487 goto again;
2489 return fault;
2492 IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
2494 // Note that this only needs to be called once, i.e. the
2495 // architected loop to purge the entire TLB, should use
2496 // base = stride1 = stride2 = 0, count0 = count 1 = 1
2498 vcpu_flush_vtlb_all(current);
2500 return IA64_NO_FAULT;
2503 IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
2505 printk("vcpu_ptc_g: called, not implemented yet\n");
2506 return IA64_ILLOP_FAULT;
2509 IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
2511 // FIXME: validate not flushing Xen addresses
2512 // if (Xen address) return(IA64_ILLOP_FAULT);
2513 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
2514 //printk("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
2516 check_xen_space_overlap("ptc_ga", vadr, addr_range);
2518 domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
2520 return IA64_NO_FAULT;
2523 IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
2525 unsigned long region = vadr >> 61;
2526 u64 addr_range = 1UL << log_range;
2527 unsigned long rid, rr;
2528 int i;
2529 TR_ENTRY *trp;
2531 BUG_ON(vcpu != current);
2532 check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
2534 rr = PSCB(vcpu, rrs)[region];
2535 rid = rr & RR_RID_MASK;
2537 /* Purge TC */
2538 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2540 /* Purge tr and recompute dtr_regions. */
2541 vcpu->arch.dtr_regions = 0;
2542 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
2543 if (vcpu_match_tr_entry_range
2544 (trp, rid, vadr, vadr + addr_range))
2545 vcpu_purge_tr_entry(trp);
2546 else if (trp->pte.p)
2547 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2548 trp->vadr);
2550 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2552 return IA64_NO_FAULT;
2555 IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
2557 unsigned long region = vadr >> 61;
2558 u64 addr_range = 1UL << log_range;
2559 unsigned long rid, rr;
2560 int i;
2561 TR_ENTRY *trp;
2563 BUG_ON(vcpu != current);
2564 check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
2566 rr = PSCB(vcpu, rrs)[region];
2567 rid = rr & RR_RID_MASK;
2569 /* Purge TC */
2570 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2572 /* Purge tr and recompute itr_regions. */
2573 vcpu->arch.itr_regions = 0;
2574 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
2575 if (vcpu_match_tr_entry_range
2576 (trp, rid, vadr, vadr + addr_range))
2577 vcpu_purge_tr_entry(trp);
2578 else if (trp->pte.p)
2579 vcpu_quick_region_set(vcpu->arch.itr_regions,
2580 trp->vadr);
2582 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2584 return IA64_NO_FAULT;