ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 15882:923795831f9a

[IA64] tak emulation, minor 4k-page correction

Signed-off-by: Juergen Gross juergen.gross@fujitsu-siemens.com
author Alex Williamson <alex.williamson@hp.com>
date Tue Sep 11 15:12:39 2007 -0600 (2007-09-11)
parents b5dbf184df6c
children 4c020dd76b18
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/xen.h>
11 #include <xen/mm.h>
12 #include <asm/ia64_int.h>
13 #include <asm/vcpu.h>
14 #include <asm/regionreg.h>
15 #include <asm/tlb.h>
16 #include <asm/processor.h>
17 #include <asm/delay.h>
18 #include <asm/vmx_vcpu.h>
19 #include <asm/vhpt.h>
20 #include <asm/tlbflush.h>
21 #include <asm/privop.h>
22 #include <xen/event.h>
23 #include <asm/vmx_phy_mode.h>
24 #include <asm/bundle.h>
25 #include <asm/privop_stat.h>
26 #include <asm/uaccess.h>
27 #include <asm/p2m_entry.h>
28 #include <asm/tlb_track.h>
30 /* FIXME: where these declarations should be there ? */
31 extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
32 struct pt_regs *regs);
33 extern void setreg(unsigned long regnum, unsigned long val, int nat,
34 struct pt_regs *regs);
35 extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
36 struct pt_regs *regs);
38 extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
39 struct pt_regs *regs);
41 typedef union {
42 struct ia64_psr ia64_psr;
43 unsigned long i64;
44 } PSR;
46 // this def for vcpu_regs won't work if kernel stack is present
47 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
49 #define IA64_PTA_SZ_BIT 2
50 #define IA64_PTA_VF_BIT 8
51 #define IA64_PTA_BASE_BIT 15
52 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
54 #define IA64_PSR_NON_VIRT_BITS \
55 (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | \
56 IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK | \
57 IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB | \
58 IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID | \
59 IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | \
60 IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
62 unsigned long vcpu_verbose = 0;
64 /**************************************************************************
65 VCPU general register access routines
66 **************************************************************************/
67 #ifdef XEN
68 u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
69 {
70 REGS *regs = vcpu_regs(vcpu);
71 u64 val;
73 if (!reg)
74 return 0;
75 getreg(reg, &val, 0, regs); // FIXME: handle NATs later
76 return val;
77 }
79 IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
80 {
81 REGS *regs = vcpu_regs(vcpu);
82 int nat;
84 getreg(reg, val, &nat, regs); // FIXME: handle NATs later
85 if (nat)
86 return IA64_NAT_CONSUMPTION_VECTOR;
87 return 0;
88 }
90 // returns:
91 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
92 // IA64_NO_FAULT otherwise
93 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
94 {
95 REGS *regs = vcpu_regs(vcpu);
96 long sof = (regs->cr_ifs) & 0x7f;
98 if (!reg)
99 return IA64_ILLOP_FAULT;
100 if (reg >= sof + 32)
101 return IA64_ILLOP_FAULT;
102 setreg(reg, value, nat, regs); // FIXME: handle NATs later
103 return IA64_NO_FAULT;
104 }
106 IA64FAULT
107 vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
108 {
109 REGS *regs = vcpu_regs(vcpu);
110 getfpreg(reg, val, regs); // FIXME: handle NATs later
111 return IA64_NO_FAULT;
112 }
114 IA64FAULT
115 vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
116 {
117 REGS *regs = vcpu_regs(vcpu);
118 if (reg > 1)
119 setfpreg(reg, val, regs); // FIXME: handle NATs later
120 return IA64_NO_FAULT;
121 }
123 #else
124 // returns:
125 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
126 // IA64_NO_FAULT otherwise
127 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
128 {
129 REGS *regs = vcpu_regs(vcpu);
130 long sof = (regs->cr_ifs) & 0x7f;
132 if (!reg)
133 return IA64_ILLOP_FAULT;
134 if (reg >= sof + 32)
135 return IA64_ILLOP_FAULT;
136 setreg(reg, value, 0, regs); // FIXME: handle NATs later
137 return IA64_NO_FAULT;
138 }
140 #endif
142 void vcpu_init_regs(struct vcpu *v)
143 {
144 struct pt_regs *regs;
146 regs = vcpu_regs(v);
147 if (VMX_DOMAIN(v)) {
148 /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
149 /* Need to be expanded as macro */
150 regs->cr_ipsr = 0x501008826008;
151 /* lazy fp */
152 FP_PSR(v) = IA64_PSR_DFH;
153 regs->cr_ipsr |= IA64_PSR_DFH;
154 } else {
155 regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
156 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
157 regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
158 | IA64_PSR_RI | IA64_PSR_IS);
159 // domain runs at PL2
160 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
161 // lazy fp
162 PSCB(v, hpsr_dfh) = 1;
163 PSCB(v, hpsr_mfh) = 0;
164 regs->cr_ipsr |= IA64_PSR_DFH;
165 }
166 regs->cr_ifs = 1UL << 63; /* or clear? */
167 regs->ar_fpsr = FPSR_DEFAULT;
169 if (VMX_DOMAIN(v)) {
170 vmx_init_all_rr(v);
171 /* Virtual processor context setup */
172 VCPU(v, vpsr) = IA64_PSR_BN;
173 VCPU(v, dcr) = 0;
174 } else {
175 init_all_rr(v);
176 regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
177 VCPU(v, banknum) = 1;
178 VCPU(v, metaphysical_mode) = 1;
179 VCPU(v, interrupt_mask_addr) =
180 (unsigned char *)v->domain->arch.shared_info_va +
181 INT_ENABLE_OFFSET(v);
182 VCPU(v, itv) = (1 << 16); /* timer vector masked */
184 v->vcpu_info->evtchn_upcall_pending = 0;
185 v->vcpu_info->evtchn_upcall_mask = -1;
186 }
188 /* pta.size must not be 0. The minimum is 15 (32k) */
189 VCPU(v, pta) = 15 << 2;
191 v->arch.domain_itm_last = -1L;
192 }
194 /**************************************************************************
195 VCPU privileged application register access routines
196 **************************************************************************/
198 void vcpu_load_kernel_regs(VCPU * vcpu)
199 {
200 ia64_set_kr(0, VCPU(vcpu, krs[0]));
201 ia64_set_kr(1, VCPU(vcpu, krs[1]));
202 ia64_set_kr(2, VCPU(vcpu, krs[2]));
203 ia64_set_kr(3, VCPU(vcpu, krs[3]));
204 ia64_set_kr(4, VCPU(vcpu, krs[4]));
205 ia64_set_kr(5, VCPU(vcpu, krs[5]));
206 ia64_set_kr(6, VCPU(vcpu, krs[6]));
207 ia64_set_kr(7, VCPU(vcpu, krs[7]));
208 }
210 /* GCC 4.0.2 seems not to be able to suppress this call!. */
211 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
213 IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
214 {
215 if (reg == 44)
216 return vcpu_set_itc(vcpu, val);
217 else if (reg == 27)
218 return IA64_ILLOP_FAULT;
219 else if (reg == 24)
220 printk("warning: setting ar.eflg is a no-op; no IA-32 "
221 "support\n");
222 else if (reg > 7)
223 return IA64_ILLOP_FAULT;
224 else {
225 PSCB(vcpu, krs[reg]) = val;
226 ia64_set_kr(reg, val);
227 }
228 return IA64_NO_FAULT;
229 }
231 IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
232 {
233 if (reg == 24)
234 printk("warning: getting ar.eflg is a no-op; no IA-32 "
235 "support\n");
236 else if (reg > 7)
237 return IA64_ILLOP_FAULT;
238 else
239 *val = PSCB(vcpu, krs[reg]);
240 return IA64_NO_FAULT;
241 }
243 /**************************************************************************
244 VCPU protection key emulating for PV
245 This first implementation reserves 1 pkr for the hypervisor key.
246 On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the
247 hypervisor may run with psr.pk==1. The key for the hypervisor is 0.
248 Furthermore the VCPU is flagged to use the protection keys.
249 Currently the domU has to take care of the used keys, because on setting
250 a pkr there is no check against other pkr's whether this key is already
251 used.
252 **************************************************************************/
254 /* The function loads the protection key registers from the struct arch_vcpu
255 * into the processor pkr's! Called in context_switch().
256 * TODO: take care of the order of writing pkr's!
257 */
258 void vcpu_pkr_load_regs(VCPU * vcpu)
259 {
260 int i;
262 for (i = 0; i <= XEN_IA64_NPKRS; i++)
263 ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));
264 }
266 /* The function activates the pkr handling. */
267 static void vcpu_pkr_set_psr_handling(VCPU * vcpu)
268 {
269 if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)
270 return;
272 vcpu_pkr_use_set(vcpu);
273 PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;
275 /* Write the special key for the hypervisor into pkr[15]. */
276 ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);
277 }
279 /**************************************************************************
280 VCPU processor status register access routines
281 **************************************************************************/
283 void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
284 {
285 /* only do something if mode changes */
286 if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
287 PSCB(vcpu, metaphysical_mode) = newmode;
288 if (newmode)
289 set_metaphysical_rr0();
290 else if (PSCB(vcpu, rrs[0]) != -1)
291 set_one_rr(0, PSCB(vcpu, rrs[0]));
292 }
293 }
295 IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
296 {
297 vcpu_set_metaphysical_mode(vcpu, TRUE);
298 return IA64_NO_FAULT;
299 }
301 IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
302 {
303 struct ia64_psr psr, imm, *ipsr;
304 REGS *regs = vcpu_regs(vcpu);
306 //PRIVOP_COUNT_ADDR(regs,_RSM);
307 // TODO: All of these bits need to be virtualized
308 // TODO: Only allowed for current vcpu
309 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
310 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
311 imm = *(struct ia64_psr *)&imm24;
312 // interrupt flag
313 if (imm.i)
314 vcpu->vcpu_info->evtchn_upcall_mask = 1;
315 if (imm.ic)
316 PSCB(vcpu, interrupt_collection_enabled) = 0;
317 // interrupt collection flag
318 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
319 // just handle psr.up and psr.pp for now
320 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
321 IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
322 IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))
323 return IA64_ILLOP_FAULT;
324 if (imm.dfh) {
325 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
326 PSCB(vcpu, vpsr_dfh) = 0;
327 }
328 if (imm.dfl)
329 ipsr->dfl = 0;
330 if (imm.pp) {
331 ipsr->pp = 1;
332 psr.pp = 1; // priv perf ctrs always enabled
333 PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
334 }
335 if (imm.up) {
336 ipsr->up = 0;
337 psr.up = 0;
338 }
339 if (imm.sp) {
340 ipsr->sp = 0;
341 psr.sp = 0;
342 }
343 if (imm.be)
344 ipsr->be = 0;
345 if (imm.dt)
346 vcpu_set_metaphysical_mode(vcpu, TRUE);
347 if (imm.pk) {
348 ipsr->pk = 0;
349 vcpu_pkr_use_unset(vcpu);
350 }
351 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
352 return IA64_NO_FAULT;
353 }
355 IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
356 {
357 vcpu_set_metaphysical_mode(vcpu, FALSE);
358 return IA64_NO_FAULT;
359 }
361 IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
362 {
363 vcpu->vcpu_info->evtchn_upcall_mask = 0;
364 PSCB(vcpu, interrupt_collection_enabled) = 1;
365 return IA64_NO_FAULT;
366 }
368 IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
369 {
370 struct ia64_psr psr, imm, *ipsr;
371 REGS *regs = vcpu_regs(vcpu);
372 u64 mask, enabling_interrupts = 0;
374 //PRIVOP_COUNT_ADDR(regs,_SSM);
375 // TODO: All of these bits need to be virtualized
376 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
377 imm = *(struct ia64_psr *)&imm24;
378 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
379 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
380 mask =
381 IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
382 IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |
383 IA64_PSR_PK;
384 if (imm24 & ~mask)
385 return IA64_ILLOP_FAULT;
386 if (imm.dfh) {
387 PSCB(vcpu, vpsr_dfh) = 1;
388 ipsr->dfh = 1;
389 }
390 if (imm.dfl)
391 ipsr->dfl = 1;
392 if (imm.pp) {
393 ipsr->pp = 1;
394 psr.pp = 1;
395 PSCB(vcpu, vpsr_pp) = 1;
396 }
397 if (imm.sp) {
398 ipsr->sp = 1;
399 psr.sp = 1;
400 }
401 if (imm.i) {
402 if (vcpu->vcpu_info->evtchn_upcall_mask) {
403 //printk("vcpu_set_psr_sm: psr.ic 0->1\n");
404 enabling_interrupts = 1;
405 }
406 vcpu->vcpu_info->evtchn_upcall_mask = 0;
407 }
408 if (imm.ic)
409 PSCB(vcpu, interrupt_collection_enabled) = 1;
410 // TODO: do this faster
411 if (imm.mfl) {
412 ipsr->mfl = 1;
413 psr.mfl = 1;
414 }
415 if (imm.mfh) {
416 ipsr->mfh = 1;
417 psr.mfh = 1;
418 }
419 if (imm.ac) {
420 ipsr->ac = 1;
421 psr.ac = 1;
422 }
423 if (imm.up) {
424 ipsr->up = 1;
425 psr.up = 1;
426 }
427 if (imm.be)
428 ipsr->be = 1;
429 if (imm.dt)
430 vcpu_set_metaphysical_mode(vcpu, FALSE);
431 if (imm.pk) {
432 vcpu_pkr_set_psr_handling(vcpu);
433 ipsr->pk = 1;
434 }
435 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
436 if (enabling_interrupts &&
437 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
438 PSCB(vcpu, pending_interruption) = 1;
439 return IA64_NO_FAULT;
440 }
442 IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
443 {
444 struct ia64_psr newpsr, *ipsr;
445 REGS *regs = vcpu_regs(vcpu);
446 u64 enabling_interrupts = 0;
448 newpsr = *(struct ia64_psr *)&val;
449 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
450 // just handle psr.up and psr.pp for now
451 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
452 // return IA64_ILLOP_FAULT;
453 // however trying to set other bits can't be an error as it is in ssm
454 if (newpsr.dfh) {
455 ipsr->dfh = 1;
456 PSCB(vcpu, vpsr_dfh) = 1;
457 } else {
458 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
459 PSCB(vcpu, vpsr_dfh) = 0;
460 }
461 if (newpsr.dfl)
462 ipsr->dfl = 1;
463 if (newpsr.pp) {
464 ipsr->pp = 1;
465 PSCB(vcpu, vpsr_pp) = 1;
466 } else {
467 ipsr->pp = 1;
468 PSCB(vcpu, vpsr_pp) = 0;
469 }
470 if (newpsr.up)
471 ipsr->up = 1;
472 if (newpsr.sp)
473 ipsr->sp = 1;
474 if (newpsr.i) {
475 if (vcpu->vcpu_info->evtchn_upcall_mask)
476 enabling_interrupts = 1;
477 vcpu->vcpu_info->evtchn_upcall_mask = 0;
478 }
479 if (newpsr.ic)
480 PSCB(vcpu, interrupt_collection_enabled) = 1;
481 if (newpsr.mfl)
482 ipsr->mfl = 1;
483 if (newpsr.mfh)
484 ipsr->mfh = 1;
485 if (newpsr.ac)
486 ipsr->ac = 1;
487 if (newpsr.up)
488 ipsr->up = 1;
489 if (newpsr.dt && newpsr.rt)
490 vcpu_set_metaphysical_mode(vcpu, FALSE);
491 else
492 vcpu_set_metaphysical_mode(vcpu, TRUE);
493 if (newpsr.be)
494 ipsr->be = 1;
495 if (newpsr.pk) {
496 vcpu_pkr_set_psr_handling(vcpu);
497 ipsr->pk = 1;
498 } else
499 vcpu_pkr_use_unset(vcpu);
500 if (enabling_interrupts &&
501 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
502 PSCB(vcpu, pending_interruption) = 1;
503 return IA64_NO_FAULT;
504 }
506 IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val)
507 {
508 IA64_PSR newpsr, vpsr;
509 REGS *regs = vcpu_regs(vcpu);
510 u64 enabling_interrupts = 0;
512 /* Copy non-virtualized bits. */
513 newpsr.val = val & IA64_PSR_NON_VIRT_BITS;
515 /* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0) */
516 newpsr.val |= IA64_PSR_DI;
518 newpsr.val |= IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT |
519 IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI | IA64_PSR_PP;
521 vpsr.val = val;
523 if (val & IA64_PSR_DFH) {
524 newpsr.dfh = 1;
525 PSCB(vcpu, vpsr_dfh) = 1;
526 } else {
527 newpsr.dfh = PSCB(vcpu, hpsr_dfh);
528 PSCB(vcpu, vpsr_dfh) = 0;
529 }
531 PSCB(vcpu, vpsr_pp) = vpsr.pp;
533 if (vpsr.i) {
534 if (vcpu->vcpu_info->evtchn_upcall_mask)
535 enabling_interrupts = 1;
537 vcpu->vcpu_info->evtchn_upcall_mask = 0;
539 if (enabling_interrupts &&
540 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
541 PSCB(vcpu, pending_interruption) = 1;
542 } else
543 vcpu->vcpu_info->evtchn_upcall_mask = 1;
545 PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
546 vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
548 newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL);
550 if (PSCB(vcpu, banknum) != vpsr.bn) {
551 if (vpsr.bn)
552 vcpu_bsw1(vcpu);
553 else
554 vcpu_bsw0(vcpu);
555 }
556 if (vpsr.pk) {
557 vcpu_pkr_set_psr_handling(vcpu);
558 newpsr.pk = 1;
559 } else
560 vcpu_pkr_use_unset(vcpu);
562 regs->cr_ipsr = newpsr.val;
564 return IA64_NO_FAULT;
565 }
567 u64 vcpu_get_psr(VCPU * vcpu)
568 {
569 REGS *regs = vcpu_regs(vcpu);
570 PSR newpsr;
571 PSR ipsr;
573 ipsr.i64 = regs->cr_ipsr;
575 /* Copy non-virtualized bits. */
576 newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS;
578 /* Bits forced to 1 (psr.si and psr.is are forced to 0) */
579 newpsr.i64 |= IA64_PSR_DI;
581 /* System mask. */
582 newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
583 newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
585 if (!PSCB(vcpu, metaphysical_mode))
586 newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
588 newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
589 newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
591 /* Fool cpl. */
592 if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
593 newpsr.ia64_psr.cpl = 0;
594 else
595 newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
597 newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
599 return newpsr.i64;
600 }
602 IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
603 {
604 u64 psr = vcpu_get_psr(vcpu);
605 *pval = psr & (MASK(0, 32) | MASK(35, 2));
606 return IA64_NO_FAULT;
607 }
609 BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
610 {
611 return !!PSCB(vcpu, interrupt_collection_enabled);
612 }
614 BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
615 {
616 return !vcpu->vcpu_info->evtchn_upcall_mask;
617 }
619 /**************************************************************************
620 VCPU control register access routines
621 **************************************************************************/
623 IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
624 {
625 *pval = PSCB(vcpu, dcr);
626 return IA64_NO_FAULT;
627 }
629 IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
630 {
631 if (VMX_DOMAIN(vcpu))
632 *pval = PSCB(vcpu, iva) & ~0x7fffL;
633 else
634 *pval = PSCBX(vcpu, iva) & ~0x7fffL;
636 return IA64_NO_FAULT;
637 }
639 IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
640 {
641 *pval = PSCB(vcpu, pta);
642 return IA64_NO_FAULT;
643 }
645 IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
646 {
647 //REGS *regs = vcpu_regs(vcpu);
648 //*pval = regs->cr_ipsr;
649 *pval = PSCB(vcpu, ipsr);
650 return IA64_NO_FAULT;
651 }
653 IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
654 {
655 *pval = PSCB(vcpu, isr);
656 return IA64_NO_FAULT;
657 }
659 IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
660 {
661 //REGS *regs = vcpu_regs(vcpu);
662 //*pval = regs->cr_iip;
663 *pval = PSCB(vcpu, iip);
664 return IA64_NO_FAULT;
665 }
667 IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
668 {
669 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
670 *pval = PSCB(vcpu, ifa);
671 return IA64_NO_FAULT;
672 }
674 unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
675 {
676 ia64_rr rr;
678 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
679 return rr.ps;
680 }
682 unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
683 {
684 ia64_rr rr;
686 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
687 return rr.rid;
688 }
690 unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
691 {
692 ia64_rr rr;
694 rr.rrval = 0;
695 rr.ps = vcpu_get_rr_ps(vcpu, ifa);
696 rr.rid = vcpu_get_rr_rid(vcpu, ifa);
697 return rr.rrval;
698 }
700 IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
701 {
702 u64 val = PSCB(vcpu, itir);
703 *pval = val;
704 return IA64_NO_FAULT;
705 }
707 IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
708 {
709 u64 val = PSCB(vcpu, iipa);
710 // SP entry code does not save iipa yet nor does it get
711 // properly delivered in the pscb
712 // printk("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
713 *pval = val;
714 return IA64_NO_FAULT;
715 }
717 IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
718 {
719 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
720 //*pval = PSCB(vcpu,regs).cr_ifs;
721 *pval = PSCB(vcpu, ifs);
722 return IA64_NO_FAULT;
723 }
725 IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
726 {
727 u64 val = PSCB(vcpu, iim);
728 *pval = val;
729 return IA64_NO_FAULT;
730 }
732 IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
733 {
734 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
735 *pval = PSCB(vcpu, iha);
736 return IA64_NO_FAULT;
737 }
739 IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
740 {
741 PSCB(vcpu, dcr) = val;
742 return IA64_NO_FAULT;
743 }
745 IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
746 {
747 if (VMX_DOMAIN(vcpu))
748 PSCB(vcpu, iva) = val & ~0x7fffL;
749 else
750 PSCBX(vcpu, iva) = val & ~0x7fffL;
752 return IA64_NO_FAULT;
753 }
755 IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
756 {
757 if (val & (0x3f << 9)) /* reserved fields */
758 return IA64_RSVDREG_FAULT;
759 if (val & 2) /* reserved fields */
760 return IA64_RSVDREG_FAULT;
761 PSCB(vcpu, pta) = val;
762 return IA64_NO_FAULT;
763 }
765 IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
766 {
767 PSCB(vcpu, ipsr) = val;
768 return IA64_NO_FAULT;
769 }
771 IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
772 {
773 PSCB(vcpu, isr) = val;
774 return IA64_NO_FAULT;
775 }
777 IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
778 {
779 PSCB(vcpu, iip) = val;
780 return IA64_NO_FAULT;
781 }
783 IA64FAULT vcpu_increment_iip(VCPU * vcpu)
784 {
785 REGS *regs = vcpu_regs(vcpu);
786 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
787 if (ipsr->ri == 2) {
788 ipsr->ri = 0;
789 regs->cr_iip += 16;
790 } else
791 ipsr->ri++;
792 return IA64_NO_FAULT;
793 }
795 IA64FAULT vcpu_decrement_iip(VCPU * vcpu)
796 {
797 REGS *regs = vcpu_regs(vcpu);
798 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
800 if (ipsr->ri == 0) {
801 ipsr->ri = 2;
802 regs->cr_iip -= 16;
803 } else
804 ipsr->ri--;
806 return IA64_NO_FAULT;
807 }
809 IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
810 {
811 PSCB(vcpu, ifa) = val;
812 return IA64_NO_FAULT;
813 }
815 IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
816 {
817 PSCB(vcpu, itir) = val;
818 return IA64_NO_FAULT;
819 }
821 IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
822 {
823 // SP entry code does not save iipa yet nor does it get
824 // properly delivered in the pscb
825 // printk("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
826 PSCB(vcpu, iipa) = val;
827 return IA64_NO_FAULT;
828 }
830 IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
831 {
832 //REGS *regs = vcpu_regs(vcpu);
833 PSCB(vcpu, ifs) = val;
834 return IA64_NO_FAULT;
835 }
837 IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
838 {
839 PSCB(vcpu, iim) = val;
840 return IA64_NO_FAULT;
841 }
843 IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
844 {
845 PSCB(vcpu, iha) = val;
846 return IA64_NO_FAULT;
847 }
849 /**************************************************************************
850 VCPU interrupt control register access routines
851 **************************************************************************/
853 void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
854 {
855 PSCB(vcpu, pending_interruption) = 1;
856 }
858 void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
859 {
860 if (vector & ~0xff) {
861 printk("vcpu_pend_interrupt: bad vector\n");
862 return;
863 }
865 if (vcpu->arch.event_callback_ip) {
866 printk("Deprecated interface. Move to new event based "
867 "solution\n");
868 return;
869 }
871 if (VMX_DOMAIN(vcpu)) {
872 set_bit(vector, VCPU(vcpu, irr));
873 } else {
874 set_bit(vector, PSCBX(vcpu, irr));
875 PSCB(vcpu, pending_interruption) = 1;
876 }
877 }
879 #define IA64_TPR_MMI 0x10000
880 #define IA64_TPR_MIC 0x000f0
882 /* checks to see if a VCPU has any unmasked pending interrupts
883 * if so, returns the highest, else returns SPURIOUS_VECTOR */
884 /* NOTE: Since this gets called from vcpu_get_ivr() and the
885 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
886 * this routine also ignores pscb.interrupt_delivery_enabled
887 * and this must be checked independently; see vcpu_deliverable interrupts() */
888 u64 vcpu_check_pending_interrupts(VCPU * vcpu)
889 {
890 u64 *p, *r, bits, bitnum, mask, i, vector;
892 if (vcpu->arch.event_callback_ip)
893 return SPURIOUS_VECTOR;
895 /* Always check pending event, since guest may just ack the
896 * event injection without handle. Later guest may throw out
897 * the event itself.
898 */
899 check_start:
900 if (event_pending(vcpu) &&
901 !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
902 &PSCBX(vcpu, insvc[0])))
903 vcpu_pend_interrupt(vcpu,
904 vcpu->domain->shared_info->arch.
905 evtchn_vector);
907 p = &PSCBX(vcpu, irr[3]);
908 r = &PSCBX(vcpu, insvc[3]);
909 for (i = 3 ;; p--, r--, i--) {
910 bits = *p;
911 if (bits)
912 break; // got a potential interrupt
913 if (*r) {
914 // nothing in this word which is pending+inservice
915 // but there is one inservice which masks lower
916 return SPURIOUS_VECTOR;
917 }
918 if (i == 0) {
919 // checked all bits... nothing pending+inservice
920 return SPURIOUS_VECTOR;
921 }
922 }
923 // have a pending,deliverable interrupt... see if it is masked
924 bitnum = ia64_fls(bits);
925 //printk("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
926 vector = bitnum + (i * 64);
927 mask = 1L << bitnum;
928 /* sanity check for guest timer interrupt */
929 if (vector == (PSCB(vcpu, itv) & 0xff)) {
930 uint64_t now = ia64_get_itc();
931 if (now < PSCBX(vcpu, domain_itm)) {
932 // printk("Ooops, pending guest timer before its due\n");
933 PSCBX(vcpu, irr[i]) &= ~mask;
934 goto check_start;
935 }
936 }
937 //printk("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector);
938 if (*r >= mask) {
939 // masked by equal inservice
940 //printk("but masked by equal inservice\n");
941 return SPURIOUS_VECTOR;
942 }
943 if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
944 // tpr.mmi is set
945 //printk("but masked by tpr.mmi\n");
946 return SPURIOUS_VECTOR;
947 }
948 if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
949 //tpr.mic masks class
950 //printk("but masked by tpr.mic\n");
951 return SPURIOUS_VECTOR;
952 }
953 //printk("returned to caller\n");
954 return vector;
955 }
957 u64 vcpu_deliverable_interrupts(VCPU * vcpu)
958 {
959 return (vcpu_get_psr_i(vcpu) &&
960 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
961 }
963 u64 vcpu_deliverable_timer(VCPU * vcpu)
964 {
965 return (vcpu_get_psr_i(vcpu) &&
966 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
967 }
969 IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
970 {
971 /* Use EID=0, ID=vcpu_id. */
972 *pval = vcpu->vcpu_id << 24;
973 return IA64_NO_FAULT;
974 }
976 IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
977 {
978 int i;
979 u64 vector, mask;
981 #define HEARTBEAT_FREQ 16 // period in seconds
982 #ifdef HEARTBEAT_FREQ
983 #define N_DOMS 16 // period in seconds
984 #if 0
985 static long count[N_DOMS] = { 0 };
986 #endif
987 static long nonclockcount[N_DOMS] = { 0 };
988 unsigned domid = vcpu->domain->domain_id;
989 #endif
990 #ifdef IRQ_DEBUG
991 static char firstivr = 1;
992 static char firsttime[256];
993 if (firstivr) {
994 int i;
995 for (i = 0; i < 256; i++)
996 firsttime[i] = 1;
997 firstivr = 0;
998 }
999 #endif
1001 vector = vcpu_check_pending_interrupts(vcpu);
1002 if (vector == SPURIOUS_VECTOR) {
1003 PSCB(vcpu, pending_interruption) = 0;
1004 *pval = vector;
1005 return IA64_NO_FAULT;
1007 #ifdef HEARTBEAT_FREQ
1008 if (domid >= N_DOMS)
1009 domid = N_DOMS - 1;
1010 #if 0
1011 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1012 if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
1013 printk("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
1014 domid, count[domid], nonclockcount[domid]);
1015 //count[domid] = 0;
1016 //dump_runq();
1019 #endif
1020 else
1021 nonclockcount[domid]++;
1022 #endif
1023 // now have an unmasked, pending, deliverable vector!
1024 // getting ivr has "side effects"
1025 #ifdef IRQ_DEBUG
1026 if (firsttime[vector]) {
1027 printk("*** First get_ivr on vector=%lu,itc=%lx\n",
1028 vector, ia64_get_itc());
1029 firsttime[vector] = 0;
1031 #endif
1032 /* if delivering a timer interrupt, remember domain_itm, which
1033 * needs to be done before clearing irr
1034 */
1035 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1036 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1039 i = vector >> 6;
1040 mask = 1L << (vector & 0x3f);
1041 //printk("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
1042 PSCBX(vcpu, insvc[i]) |= mask;
1043 PSCBX(vcpu, irr[i]) &= ~mask;
1044 //PSCB(vcpu,pending_interruption)--;
1045 *pval = vector;
1046 return IA64_NO_FAULT;
1049 IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
1051 *pval = PSCB(vcpu, tpr);
1052 return IA64_NO_FAULT;
1055 IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
1057 *pval = 0L; // reads of eoi always return 0
1058 return IA64_NO_FAULT;
1061 IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
1063 *pval = PSCBX(vcpu, irr[0]);
1064 return IA64_NO_FAULT;
1067 IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
1069 *pval = PSCBX(vcpu, irr[1]);
1070 return IA64_NO_FAULT;
1073 IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
1075 *pval = PSCBX(vcpu, irr[2]);
1076 return IA64_NO_FAULT;
1079 IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
1081 *pval = PSCBX(vcpu, irr[3]);
1082 return IA64_NO_FAULT;
1085 IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
1087 *pval = PSCB(vcpu, itv);
1088 return IA64_NO_FAULT;
1091 IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
1093 *pval = PSCB(vcpu, pmv);
1094 return IA64_NO_FAULT;
1097 IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
1099 *pval = PSCB(vcpu, cmcv);
1100 return IA64_NO_FAULT;
1103 IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
1105 // fix this when setting values other than m-bit is supported
1106 gdprintk(XENLOG_DEBUG,
1107 "vcpu_get_lrr0: Unmasked interrupts unsupported\n");
1108 *pval = (1L << 16);
1109 return IA64_NO_FAULT;
1112 IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
1114 // fix this when setting values other than m-bit is supported
1115 gdprintk(XENLOG_DEBUG,
1116 "vcpu_get_lrr1: Unmasked interrupts unsupported\n");
1117 *pval = (1L << 16);
1118 return IA64_NO_FAULT;
1121 IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
1123 printk("vcpu_set_lid: Setting cr.lid is unsupported\n");
1124 return IA64_ILLOP_FAULT;
1127 IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
1129 if (val & 0xff00)
1130 return IA64_RSVDREG_FAULT;
1131 PSCB(vcpu, tpr) = val;
1132 /* This can unmask interrupts. */
1133 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1134 PSCB(vcpu, pending_interruption) = 1;
1135 return IA64_NO_FAULT;
1138 IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
1140 u64 *p, bits, vec, bitnum;
1141 int i;
1143 p = &PSCBX(vcpu, insvc[3]);
1144 for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
1146 if (i < 0) {
1147 printk("Trying to EOI interrupt when none are in-service.\n");
1148 return IA64_NO_FAULT;
1150 bitnum = ia64_fls(bits);
1151 vec = bitnum + (i * 64);
1152 /* clear the correct bit */
1153 bits &= ~(1L << bitnum);
1154 *p = bits;
1155 /* clearing an eoi bit may unmask another pending interrupt... */
1156 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
1157 // worry about this later... Linux only calls eoi
1158 // with interrupts disabled
1159 printk("Trying to EOI interrupt with interrupts enabled\n");
1161 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1162 PSCB(vcpu, pending_interruption) = 1;
1163 //printk("YYYYY vcpu_set_eoi: Successful\n");
1164 return IA64_NO_FAULT;
1167 IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
1169 if (!(val & (1L << 16))) {
1170 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1171 return IA64_ILLOP_FAULT;
1173 // no place to save this state but nothing to do anyway
1174 return IA64_NO_FAULT;
1177 IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
1179 if (!(val & (1L << 16))) {
1180 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1181 return IA64_ILLOP_FAULT;
1183 // no place to save this state but nothing to do anyway
1184 return IA64_NO_FAULT;
1187 IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
1189 /* Check reserved fields. */
1190 if (val & 0xef00)
1191 return IA64_ILLOP_FAULT;
1192 PSCB(vcpu, itv) = val;
1193 if (val & 0x10000) {
1194 /* Disable itm. */
1195 PSCBX(vcpu, domain_itm) = 0;
1196 } else
1197 vcpu_set_next_timer(vcpu);
1198 return IA64_NO_FAULT;
1201 IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
1203 if (val & 0xef00) /* reserved fields */
1204 return IA64_RSVDREG_FAULT;
1205 PSCB(vcpu, pmv) = val;
1206 return IA64_NO_FAULT;
1209 IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
1211 if (val & 0xef00) /* reserved fields */
1212 return IA64_RSVDREG_FAULT;
1213 PSCB(vcpu, cmcv) = val;
1214 return IA64_NO_FAULT;
1217 /**************************************************************************
1218 VCPU temporary register access routines
1219 **************************************************************************/
1220 u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
1222 if (index > 7)
1223 return 0;
1224 return PSCB(vcpu, tmp[index]);
1227 void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
1229 if (index <= 7)
1230 PSCB(vcpu, tmp[index]) = val;
1233 /**************************************************************************
1234 Interval timer routines
1235 **************************************************************************/
1237 BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
1239 u64 itv = PSCB(vcpu, itv);
1240 return (!itv || !!(itv & 0x10000));
1243 BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
1245 u64 itv = PSCB(vcpu, itv);
1246 return test_bit(itv, PSCBX(vcpu, insvc));
1249 BOOLEAN vcpu_timer_expired(VCPU * vcpu)
1251 unsigned long domain_itm = PSCBX(vcpu, domain_itm);
1252 unsigned long now = ia64_get_itc();
1254 if (!domain_itm)
1255 return FALSE;
1256 if (now < domain_itm)
1257 return FALSE;
1258 if (vcpu_timer_disabled(vcpu))
1259 return FALSE;
1260 return TRUE;
1263 void vcpu_safe_set_itm(unsigned long val)
1265 unsigned long epsilon = 100;
1266 unsigned long flags;
1267 u64 now = ia64_get_itc();
1269 local_irq_save(flags);
1270 while (1) {
1271 //printk("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1272 ia64_set_itm(val);
1273 if (val > (now = ia64_get_itc()))
1274 break;
1275 val = now + epsilon;
1276 epsilon <<= 1;
1278 local_irq_restore(flags);
1281 void vcpu_set_next_timer(VCPU * vcpu)
1283 u64 d = PSCBX(vcpu, domain_itm);
1284 //u64 s = PSCBX(vcpu,xen_itm);
1285 u64 s = local_cpu_data->itm_next;
1286 u64 now = ia64_get_itc();
1288 /* gloss over the wraparound problem for now... we know it exists
1289 * but it doesn't matter right now */
1291 if (is_idle_domain(vcpu->domain)) {
1292 // printk("****** vcpu_set_next_timer called during idle!!\n");
1293 vcpu_safe_set_itm(s);
1294 return;
1296 //s = PSCBX(vcpu,xen_itm);
1297 if (d && (d > now) && (d < s)) {
1298 vcpu_safe_set_itm(d);
1299 //using_domain_as_itm++;
1300 } else {
1301 vcpu_safe_set_itm(s);
1302 //using_xen_as_itm++;
1306 IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
1308 //UINT now = ia64_get_itc();
1310 //if (val < now) val = now + 1000;
1311 //printk("*** vcpu_set_itm: called with %lx\n",val);
1312 PSCBX(vcpu, domain_itm) = val;
1313 vcpu_set_next_timer(vcpu);
1314 return IA64_NO_FAULT;
1317 IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
1319 #define DISALLOW_SETTING_ITC_FOR_NOW
1320 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1321 static int did_print;
1322 if (!did_print) {
1323 printk("vcpu_set_itc: Setting ar.itc is currently disabled "
1324 "(this message is only displayed once)\n");
1325 did_print = 1;
1327 #else
1328 u64 oldnow = ia64_get_itc();
1329 u64 olditm = PSCBX(vcpu, domain_itm);
1330 unsigned long d = olditm - oldnow;
1331 unsigned long x = local_cpu_data->itm_next - oldnow;
1333 u64 newnow = val, min_delta;
1335 local_irq_disable();
1336 if (olditm) {
1337 printk("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
1338 newnow + d);
1339 PSCBX(vcpu, domain_itm) = newnow + d;
1341 local_cpu_data->itm_next = newnow + x;
1342 d = PSCBX(vcpu, domain_itm);
1343 x = local_cpu_data->itm_next;
1345 ia64_set_itc(newnow);
1346 if (d && (d > newnow) && (d < x)) {
1347 vcpu_safe_set_itm(d);
1348 //using_domain_as_itm++;
1349 } else {
1350 vcpu_safe_set_itm(x);
1351 //using_xen_as_itm++;
1353 local_irq_enable();
1354 #endif
1355 return IA64_NO_FAULT;
1358 IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
1360 //FIXME: Implement this
1361 printk("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1362 return IA64_NO_FAULT;
1363 //return IA64_ILLOP_FAULT;
1366 IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
1368 //TODO: Implement this
1369 printk("vcpu_get_itc: Getting ar.itc is unsupported\n");
1370 return IA64_ILLOP_FAULT;
1373 void vcpu_pend_timer(VCPU * vcpu)
1375 u64 itv = PSCB(vcpu, itv) & 0xff;
1377 if (vcpu_timer_disabled(vcpu))
1378 return;
1379 //if (vcpu_timer_inservice(vcpu)) return;
1380 if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
1381 // already delivered an interrupt for this so
1382 // don't deliver another
1383 return;
1385 if (vcpu->arch.event_callback_ip) {
1386 /* A small window may occur when injecting vIRQ while related
1387 * handler has not been registered. Don't fire in such case.
1388 */
1389 if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
1390 send_guest_vcpu_virq(vcpu, VIRQ_ITC);
1391 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1393 } else
1394 vcpu_pend_interrupt(vcpu, itv);
1397 // returns true if ready to deliver a timer interrupt too early
1398 u64 vcpu_timer_pending_early(VCPU * vcpu)
1400 u64 now = ia64_get_itc();
1401 u64 itm = PSCBX(vcpu, domain_itm);
1403 if (vcpu_timer_disabled(vcpu))
1404 return 0;
1405 if (!itm)
1406 return 0;
1407 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1410 /**************************************************************************
1411 Privileged operation emulation routines
1412 **************************************************************************/
1414 static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
1416 PSCB(vcpu, ifa) = ifa;
1417 PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
1418 vcpu_thash(current, ifa, &PSCB(current, iha));
1421 IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
1423 vcpu_force_tlb_miss(vcpu, ifa);
1424 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
1425 IA64_ALT_INST_TLB_VECTOR;
1428 IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
1430 vcpu_force_tlb_miss(vcpu, ifa);
1431 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
1432 IA64_ALT_DATA_TLB_VECTOR;
1435 IA64FAULT vcpu_rfi(VCPU * vcpu)
1437 u64 ifs;
1438 REGS *regs = vcpu_regs(vcpu);
1440 vcpu_set_psr(vcpu, PSCB(vcpu, ipsr));
1442 ifs = PSCB(vcpu, ifs);
1443 if (ifs & 0x8000000000000000UL)
1444 regs->cr_ifs = ifs;
1446 regs->cr_iip = PSCB(vcpu, iip);
1448 return IA64_NO_FAULT;
1451 IA64FAULT vcpu_cover(VCPU * vcpu)
1453 // TODO: Only allowed for current vcpu
1454 REGS *regs = vcpu_regs(vcpu);
1456 if (!PSCB(vcpu, interrupt_collection_enabled)) {
1457 PSCB(vcpu, ifs) = regs->cr_ifs;
1459 regs->cr_ifs = 0;
1460 return IA64_NO_FAULT;
1463 IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
1465 u64 pta = PSCB(vcpu, pta);
1466 u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1467 u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
1468 u64 Mask = (1L << pta_sz) - 1;
1469 u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1470 u64 compMask_60_15 = ~Mask_60_15;
1471 u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
1472 u64 VHPT_offset = (vadr >> rr_ps) << 3;
1473 u64 VHPT_addr1 = vadr & 0xe000000000000000L;
1474 u64 VHPT_addr2a =
1475 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1476 u64 VHPT_addr2b =
1477 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1478 u64 VHPT_addr3 = VHPT_offset & 0x7fff;
1479 u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1480 VHPT_addr3;
1482 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1483 *pval = VHPT_addr;
1484 return IA64_NO_FAULT;
1487 IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
1489 printk("vcpu_ttag: ttag instruction unsupported\n");
1490 return IA64_ILLOP_FAULT;
1493 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1495 /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps. */
1496 static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
1498 return (b1 <= e2) && (e1 >= b2);
1501 /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
1502 Note: LSBs of base inside page_size are ignored. */
1503 static inline void
1504 check_xen_space_overlap(const char *func, u64 base, u64 page_size)
1506 /* Overlaps can occur only in region 7.
1507 (This is an optimization to bypass all the checks). */
1508 if (REGION_NUMBER(base) != 7)
1509 return;
1511 /* Mask LSBs of base. */
1512 base &= ~(page_size - 1);
1514 /* FIXME: ideally an MCA should be generated... */
1515 if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
1516 base, base + page_size)
1517 || range_overlap(current->domain->arch.shared_info_va,
1518 current->domain->arch.shared_info_va
1519 + XSI_SIZE + XMAPPEDREGS_SIZE,
1520 base, base + page_size))
1521 panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
1522 func, base);
1525 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
1526 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
1527 u64 rid)
1529 return trp->rid == rid
1530 && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
1533 static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
1535 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
1538 static inline int
1539 vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
1541 return trp->rid == rid
1542 && trp->pte.p
1543 && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
1547 static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
1548 BOOLEAN is_data)
1550 unsigned char *regions;
1551 TR_ENTRY *trp;
1552 int tr_max;
1553 int i;
1555 if (is_data) {
1556 // data
1557 regions = &vcpu->arch.dtr_regions;
1558 trp = vcpu->arch.dtrs;
1559 tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
1560 } else {
1561 // instruction
1562 regions = &vcpu->arch.itr_regions;
1563 trp = vcpu->arch.itrs;
1564 tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
1567 if (!vcpu_quick_region_check(*regions, va)) {
1568 return NULL;
1570 for (i = 0; i < tr_max; i++, trp++) {
1571 if (vcpu_match_tr_entry(trp, va, rid)) {
1572 return trp;
1575 return NULL;
1578 // return value
1579 // 0: failure
1580 // 1: success
1581 int
1582 vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
1583 IA64_BUNDLE * bundle)
1585 u64 gpip; // guest pseudo phyiscal ip
1586 unsigned long vaddr;
1587 struct page_info *page;
1589 again:
1590 #if 0
1591 // Currently xen doesn't track psr.it bits.
1592 // it assumes always psr.it = 1.
1593 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
1594 gpip = gip;
1595 } else
1596 #endif
1598 unsigned long region = REGION_NUMBER(gip);
1599 unsigned long rr = PSCB(vcpu, rrs)[region];
1600 unsigned long rid = rr & RR_RID_MASK;
1601 BOOLEAN swap_rr0;
1602 TR_ENTRY *trp;
1604 // vcpu->arch.{i, d}tlb are volatile,
1605 // copy its value to the variable, tr, before use.
1606 TR_ENTRY tr;
1608 trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
1609 if (trp != NULL) {
1610 tr = *trp;
1611 goto found;
1613 // When it failed to get a bundle, itlb miss is reflected.
1614 // Last itc.i value is cached to PSCBX(vcpu, itlb).
1615 tr = PSCBX(vcpu, itlb);
1616 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1617 //dprintk(XENLOG_WARNING,
1618 // "%s gip 0x%lx gpip 0x%lx\n", __func__,
1619 // gip, gpip);
1620 goto found;
1622 trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
1623 if (trp != NULL) {
1624 tr = *trp;
1625 goto found;
1627 #if 0
1628 tr = PSCBX(vcpu, dtlb);
1629 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1630 goto found;
1632 #endif
1634 // try to access gip with guest virtual address
1635 // This may cause tlb miss. see vcpu_translate(). Be careful!
1636 swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
1637 if (swap_rr0) {
1638 set_one_rr(0x0, PSCB(vcpu, rrs[0]));
1640 *bundle = __get_domain_bundle(gip);
1641 if (swap_rr0) {
1642 set_metaphysical_rr0();
1644 if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
1645 dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);
1646 return 0;
1648 return 1;
1650 found:
1651 gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
1652 (gip & ((1 << tr.ps) - 1));
1655 vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
1656 page = virt_to_page(vaddr);
1657 if (get_page(page, vcpu->domain) == 0) {
1658 if (page_get_owner(page) != vcpu->domain) {
1659 // This page might be a page granted by another
1660 // domain.
1661 panic_domain(regs, "domain tries to execute foreign "
1662 "domain page which might be mapped by "
1663 "grant table.\n");
1665 goto again;
1667 *bundle = *((IA64_BUNDLE *) vaddr);
1668 put_page(page);
1669 return 1;
1672 IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
1673 u64 * pteval, u64 * itir, u64 * iha)
1675 unsigned long region = address >> 61;
1676 unsigned long pta, rid, rr, key = 0;
1677 union pte_flags pte;
1678 TR_ENTRY *trp;
1680 if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
1681 // dom0 may generate an uncacheable physical address (msb=1)
1682 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1683 // FIXME: This seems to happen even though it shouldn't. Need to track
1684 // this down, but since it has been apparently harmless, just flag it for now
1685 // panic_domain(vcpu_regs(vcpu),
1687 /*
1688 * Guest may execute itc.d and rfi with psr.dt=0
1689 * When VMM try to fetch opcode, tlb miss may happen,
1690 * At this time PSCB(vcpu,metaphysical_mode)=1,
1691 * region=5,VMM need to handle this tlb miss as if
1692 * PSCB(vcpu,metaphysical_mode)=0
1693 */
1694 printk("vcpu_translate: bad physical address: 0x%lx "
1695 "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
1697 } else {
1698 *pteval = (address & _PAGE_PPN_MASK) |
1699 __DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
1700 *itir = vcpu->arch.vhpt_pg_shift << 2;
1701 perfc_incr(phys_translate);
1702 return IA64_NO_FAULT;
1704 } else if (!region && warn_region0_address) {
1705 REGS *regs = vcpu_regs(vcpu);
1706 unsigned long viip = PSCB(vcpu, iip);
1707 unsigned long vipsr = PSCB(vcpu, ipsr);
1708 unsigned long iip = regs->cr_iip;
1709 unsigned long ipsr = regs->cr_ipsr;
1710 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
1711 "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1712 address, viip, vipsr, iip, ipsr);
1715 rr = PSCB(vcpu, rrs)[region];
1716 rid = rr & RR_RID_MASK;
1717 if (is_data) {
1718 trp = vcpu_tr_lookup(vcpu, address, rid, 1);
1719 if (trp != NULL) {
1720 *pteval = trp->pte.val;
1721 *itir = trp->itir;
1722 perfc_incr(tr_translate);
1723 return IA64_NO_FAULT;
1726 // FIXME?: check itr's for data accesses too, else bad things happen?
1727 /* else */ {
1728 trp = vcpu_tr_lookup(vcpu, address, rid, 0);
1729 if (trp != NULL) {
1730 *pteval = trp->pte.val;
1731 *itir = trp->itir;
1732 perfc_incr(tr_translate);
1733 return IA64_NO_FAULT;
1737 /* check 1-entry TLB */
1738 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1739 trp = &vcpu->arch.dtlb;
1740 pte = trp->pte;
1741 if ( /* is_data && */ pte.p
1742 && vcpu_match_tr_entry_no_p(trp, address, rid)) {
1743 *pteval = pte.val;
1744 *itir = trp->itir;
1745 perfc_incr(dtlb_translate);
1746 return IA64_USE_TLB;
1749 /* check guest VHPT */
1750 pta = PSCB(vcpu, pta);
1752 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1753 // note: architecturally, iha is optionally set for alt faults but
1754 // xenlinux depends on it so should document it as part of PV interface
1755 vcpu_thash(vcpu, address, iha);
1756 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
1757 REGS *regs = vcpu_regs(vcpu);
1758 struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);
1760 /* Optimization for identity mapped region 7 OS (linux) */
1761 if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7 &&
1762 region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
1763 pte.val = address & _PAGE_PPN_MASK;
1764 pte.val = pte.val | optf->im_reg7.pgprot;
1765 key = optf->im_reg7.key;
1766 goto out;
1768 return is_data ? IA64_ALT_DATA_TLB_VECTOR :
1769 IA64_ALT_INST_TLB_VECTOR;
1772 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1773 /*
1774 * minimal support: vhpt walker is really dumb and won't find
1775 * anything
1776 */
1777 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1779 /* avoid recursively walking (short format) VHPT */
1780 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1781 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1783 if (!__access_ok(*iha)
1784 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1785 // virtual VHPT walker "missed" in TLB
1786 return IA64_VHPT_FAULT;
1788 /*
1789 * Optimisation: this VHPT walker aborts on not-present pages
1790 * instead of inserting a not-present translation, this allows
1791 * vectoring directly to the miss handler.
1792 */
1793 if (!pte.p)
1794 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1796 /* found mapping in guest VHPT! */
1797 out:
1798 *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY);
1799 *pteval = pte.val;
1800 perfc_incr(vhpt_translate);
1801 return IA64_NO_FAULT;
1804 IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
1806 u64 pteval, itir, mask, iha;
1807 IA64FAULT fault;
1809 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1810 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
1811 mask = itir_mask(itir);
1812 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1813 return IA64_NO_FAULT;
1815 return vcpu_force_data_miss(vcpu, vadr);
1818 IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
1820 u64 pteval, itir, mask, iha;
1821 IA64FAULT fault;
1823 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1824 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
1825 *key = itir & IA64_ITIR_KEY_MASK;
1826 else
1827 *key = 1;
1829 return IA64_NO_FAULT;
1832 /**************************************************************************
1833 VCPU debug breakpoint register access routines
1834 **************************************************************************/
1836 IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
1838 if (reg >= IA64_NUM_DBG_REGS)
1839 return IA64_RSVDREG_FAULT;
1840 if ((reg & 1) == 0) {
1841 /* Validate address. */
1842 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1843 return IA64_ILLOP_FAULT;
1844 } else {
1845 if (!VMX_DOMAIN(vcpu)) {
1846 /* Mask PL0. */
1847 val &= ~(1UL << 56);
1850 if (val != 0)
1851 vcpu->arch.dbg_used |= (1 << reg);
1852 else
1853 vcpu->arch.dbg_used &= ~(1 << reg);
1854 vcpu->arch.dbr[reg] = val;
1855 if (vcpu == current)
1856 ia64_set_dbr(reg, val);
1857 return IA64_NO_FAULT;
1860 IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
1862 if (reg >= IA64_NUM_DBG_REGS)
1863 return IA64_RSVDREG_FAULT;
1864 if ((reg & 1) == 0) {
1865 /* Validate address. */
1866 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1867 return IA64_ILLOP_FAULT;
1868 } else {
1869 if (!VMX_DOMAIN(vcpu)) {
1870 /* Mask PL0. */
1871 val &= ~(1UL << 56);
1874 if (val != 0)
1875 vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));
1876 else
1877 vcpu->arch.dbg_used &= ~(1 << (reg + IA64_NUM_DBG_REGS));
1878 vcpu->arch.ibr[reg] = val;
1879 if (vcpu == current)
1880 ia64_set_ibr(reg, val);
1881 return IA64_NO_FAULT;
1884 IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
1886 if (reg >= IA64_NUM_DBG_REGS)
1887 return IA64_RSVDREG_FAULT;
1888 *pval = vcpu->arch.dbr[reg];
1889 return IA64_NO_FAULT;
1892 IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
1894 if (reg >= IA64_NUM_DBG_REGS)
1895 return IA64_RSVDREG_FAULT;
1896 *pval = vcpu->arch.ibr[reg];
1897 return IA64_NO_FAULT;
1900 /**************************************************************************
1901 VCPU performance monitor register access routines
1902 **************************************************************************/
1904 IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
1906 // TODO: Should set Logical CPU state, not just physical
1907 // NOTE: Writes to unimplemented PMC registers are discarded
1908 #ifdef DEBUG_PFMON
1909 printk("vcpu_set_pmc(%x,%lx)\n", reg, val);
1910 #endif
1911 ia64_set_pmc(reg, val);
1912 return IA64_NO_FAULT;
1915 IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
1917 // TODO: Should set Logical CPU state, not just physical
1918 // NOTE: Writes to unimplemented PMD registers are discarded
1919 #ifdef DEBUG_PFMON
1920 printk("vcpu_set_pmd(%x,%lx)\n", reg, val);
1921 #endif
1922 ia64_set_pmd(reg, val);
1923 return IA64_NO_FAULT;
1926 IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
1928 // NOTE: Reads from unimplemented PMC registers return zero
1929 u64 val = (u64) ia64_get_pmc(reg);
1930 #ifdef DEBUG_PFMON
1931 printk("%lx=vcpu_get_pmc(%x)\n", val, reg);
1932 #endif
1933 *pval = val;
1934 return IA64_NO_FAULT;
1937 IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
1939 // NOTE: Reads from unimplemented PMD registers return zero
1940 u64 val = (u64) ia64_get_pmd(reg);
1941 #ifdef DEBUG_PFMON
1942 printk("%lx=vcpu_get_pmd(%x)\n", val, reg);
1943 #endif
1944 *pval = val;
1945 return IA64_NO_FAULT;
1948 /**************************************************************************
1949 VCPU banked general register access routines
1950 **************************************************************************/
1951 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1952 do{ \
1953 __asm__ __volatile__ ( \
1954 ";;extr.u %0 = %3,%6,16;;\n" \
1955 "dep %1 = %0, %1, 0, 16;;\n" \
1956 "st8 [%4] = %1\n" \
1957 "extr.u %0 = %2, 16, 16;;\n" \
1958 "dep %3 = %0, %3, %6, 16;;\n" \
1959 "st8 [%5] = %3\n" \
1960 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1961 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1962 }while(0)
1964 IA64FAULT vcpu_bsw0(VCPU * vcpu)
1966 // TODO: Only allowed for current vcpu
1967 REGS *regs = vcpu_regs(vcpu);
1968 unsigned long *r = &regs->r16;
1969 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
1970 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
1971 unsigned long *runat = &regs->eml_unat;
1972 unsigned long *b0unat = &PSCB(vcpu, vbnat);
1973 unsigned long *b1unat = &PSCB(vcpu, vnat);
1975 unsigned long i;
1977 if (VMX_DOMAIN(vcpu)) {
1978 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1979 for (i = 0; i < 16; i++) {
1980 *b1++ = *r;
1981 *r++ = *b0++;
1983 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1984 IA64_PT_REGS_R16_SLOT);
1985 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1987 } else {
1988 if (PSCB(vcpu, banknum)) {
1989 for (i = 0; i < 16; i++) {
1990 *b1++ = *r;
1991 *r++ = *b0++;
1993 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1994 IA64_PT_REGS_R16_SLOT);
1995 PSCB(vcpu, banknum) = 0;
1998 return IA64_NO_FAULT;
2001 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT) \
2002 do { \
2003 __asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n" \
2004 "dep %1 = %0, %1, 16, 16;;\n" \
2005 "st8 [%4] = %1\n" \
2006 "extr.u %0 = %2, 0, 16;;\n" \
2007 "dep %3 = %0, %3, %6, 16;;\n" \
2008 "st8 [%5] = %3\n" \
2009 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
2010 "r"(*runat), "r"(b0unat), "r"(runat), \
2011 "i"(IA64_PT_REGS_R16_SLOT): "memory"); \
2012 } while(0)
2014 IA64FAULT vcpu_bsw1(VCPU * vcpu)
2016 // TODO: Only allowed for current vcpu
2017 REGS *regs = vcpu_regs(vcpu);
2018 unsigned long *r = &regs->r16;
2019 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
2020 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
2021 unsigned long *runat = &regs->eml_unat;
2022 unsigned long *b0unat = &PSCB(vcpu, vbnat);
2023 unsigned long *b1unat = &PSCB(vcpu, vnat);
2025 unsigned long i;
2027 if (VMX_DOMAIN(vcpu)) {
2028 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
2029 for (i = 0; i < 16; i++) {
2030 *b0++ = *r;
2031 *r++ = *b1++;
2033 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2034 IA64_PT_REGS_R16_SLOT);
2035 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
2037 } else {
2038 if (!PSCB(vcpu, banknum)) {
2039 for (i = 0; i < 16; i++) {
2040 *b0++ = *r;
2041 *r++ = *b1++;
2043 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2044 IA64_PT_REGS_R16_SLOT);
2045 PSCB(vcpu, banknum) = 1;
2048 return IA64_NO_FAULT;
2051 /**************************************************************************
2052 VCPU cpuid access routines
2053 **************************************************************************/
2055 IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
2057 // FIXME: This could get called as a result of a rsvd-reg fault
2058 // if reg > 3
2059 switch (reg) {
2060 case 0:
2061 memcpy(pval, "Xen/ia64", 8);
2062 break;
2063 case 1:
2064 *pval = 0;
2065 break;
2066 case 2:
2067 *pval = 0;
2068 break;
2069 case 3:
2070 *pval = ia64_get_cpuid(3);
2071 break;
2072 case 4:
2073 *pval = ia64_get_cpuid(4);
2074 break;
2075 default:
2076 if (reg > (ia64_get_cpuid(3) & 0xff))
2077 return IA64_RSVDREG_FAULT;
2078 *pval = ia64_get_cpuid(reg);
2079 break;
2081 return IA64_NO_FAULT;
2084 /**************************************************************************
2085 VCPU region register access routines
2086 **************************************************************************/
2088 unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
2090 ia64_rr rr;
2092 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
2093 return rr.ve;
2096 IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
2098 PSCB(vcpu, rrs)[reg >> 61] = val;
2099 if (vcpu == current)
2100 set_one_rr(reg, val);
2101 return IA64_NO_FAULT;
2104 IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
2106 if (VMX_DOMAIN(vcpu))
2107 *pval = VMX(vcpu, vrr[reg >> 61]);
2108 else
2109 *pval = PSCB(vcpu, rrs)[reg >> 61];
2111 return IA64_NO_FAULT;
2114 IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
2115 u64 val3, u64 val4)
2117 u64 reg0 = 0x0000000000000000UL;
2118 u64 reg1 = 0x2000000000000000UL;
2119 u64 reg2 = 0x4000000000000000UL;
2120 u64 reg3 = 0x6000000000000000UL;
2121 u64 reg4 = 0x8000000000000000UL;
2123 PSCB(vcpu, rrs)[reg0 >> 61] = val0;
2124 PSCB(vcpu, rrs)[reg1 >> 61] = val1;
2125 PSCB(vcpu, rrs)[reg2 >> 61] = val2;
2126 PSCB(vcpu, rrs)[reg3 >> 61] = val3;
2127 PSCB(vcpu, rrs)[reg4 >> 61] = val4;
2128 if (vcpu == current) {
2129 set_one_rr(reg0, val0);
2130 set_one_rr(reg1, val1);
2131 set_one_rr(reg2, val2);
2132 set_one_rr(reg3, val3);
2133 set_one_rr(reg4, val4);
2135 return IA64_NO_FAULT;
2138 /**************************************************************************
2139 VCPU protection key register access routines
2140 **************************************************************************/
2142 IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
2144 if (reg > XEN_IA64_NPKRS)
2145 return IA64_RSVDREG_FAULT; /* register index to large */
2147 *pval = (u64) PSCBX(vcpu, pkrs[reg]);
2148 return IA64_NO_FAULT;
2151 IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
2153 ia64_pkr_t pkr_new;
2155 if (reg >= XEN_IA64_NPKRS)
2156 return IA64_RSVDREG_FAULT; /* index to large */
2158 pkr_new.val = val;
2159 if (pkr_new.reserved1)
2160 return IA64_RSVDREG_FAULT; /* reserved field */
2162 if (pkr_new.reserved2)
2163 return IA64_RSVDREG_FAULT; /* reserved field */
2165 PSCBX(vcpu, pkrs[reg]) = pkr_new.val;
2166 ia64_set_pkr(reg, pkr_new.val);
2168 return IA64_NO_FAULT;
2171 /**************************************************************************
2172 VCPU translation register access routines
2173 **************************************************************************/
2175 static void
2176 vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
2177 u64 itir, u64 ifa, u64 rid)
2179 u64 ps;
2180 union pte_flags new_pte;
2182 trp->itir = itir;
2183 trp->rid = rid;
2184 ps = trp->ps;
2185 new_pte.val = pte;
2186 if (new_pte.pl < CONFIG_CPL0_EMUL)
2187 new_pte.pl = CONFIG_CPL0_EMUL;
2188 trp->vadr = ifa & ~0xfff;
2189 if (ps > 12) { // "ignore" relevant low-order bits
2190 new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
2191 trp->vadr &= ~((1UL << ps) - 1);
2194 /* Atomic write. */
2195 trp->pte.val = new_pte.val;
2198 static inline void
2199 vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
2201 vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
2202 VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
2205 IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
2206 u64 itir, u64 ifa)
2208 TR_ENTRY *trp;
2210 if (slot >= NDTRS)
2211 return IA64_RSVDREG_FAULT;
2213 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2215 trp = &PSCBX(vcpu, dtrs[slot]);
2216 //printk("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
2217 vcpu_set_tr_entry(trp, pte, itir, ifa);
2218 vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
2220 /*
2221 * FIXME According to spec, vhpt should be purged, but this
2222 * incurs considerable performance loss, since it is safe for
2223 * linux not to purge vhpt, vhpt purge is disabled until a
2224 * feasible way is found.
2226 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2227 */
2229 return IA64_NO_FAULT;
2232 IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
2233 u64 itir, u64 ifa)
2235 TR_ENTRY *trp;
2237 if (slot >= NITRS)
2238 return IA64_RSVDREG_FAULT;
2240 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2242 trp = &PSCBX(vcpu, itrs[slot]);
2243 //printk("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
2244 vcpu_set_tr_entry(trp, pte, itir, ifa);
2245 vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
2247 /*
2248 * FIXME According to spec, vhpt should be purged, but this
2249 * incurs considerable performance loss, since it is safe for
2250 * linux not to purge vhpt, vhpt purge is disabled until a
2251 * feasible way is found.
2253 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2254 */
2256 return IA64_NO_FAULT;
2259 IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
2260 u64 itir, u64 ifa, u64 rid)
2262 TR_ENTRY *trp;
2264 if (slot >= NITRS)
2265 return IA64_RSVDREG_FAULT;
2266 trp = &PSCBX(vcpu, itrs[slot]);
2267 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2269 /* Recompute the itr_region. */
2270 vcpu->arch.itr_regions = 0;
2271 for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)
2272 if (trp->pte.p)
2273 vcpu_quick_region_set(vcpu->arch.itr_regions,
2274 trp->vadr);
2275 return IA64_NO_FAULT;
2278 IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
2279 u64 itir, u64 ifa, u64 rid)
2281 TR_ENTRY *trp;
2283 if (slot >= NDTRS)
2284 return IA64_RSVDREG_FAULT;
2285 trp = &PSCBX(vcpu, dtrs[slot]);
2286 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2288 /* Recompute the dtr_region. */
2289 vcpu->arch.dtr_regions = 0;
2290 for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)
2291 if (trp->pte.p)
2292 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2293 trp->vadr);
2294 return IA64_NO_FAULT;
2297 /**************************************************************************
2298 VCPU translation cache access routines
2299 **************************************************************************/
2301 static void
2302 vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
2304 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
2305 printk("vhpt rebuild: using page_shift %d\n", (int)ps);
2306 vcpu->arch.vhpt_pg_shift = ps;
2307 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2308 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2309 local_vhpt_flush();
2310 load_region_regs(vcpu);
2311 #else
2312 panic_domain(NULL, "domain trying to use smaller page size!\n");
2313 #endif
2316 void
2317 vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
2318 u64 mp_pte, u64 itir, struct p2m_entry *entry)
2320 ia64_itir_t _itir = {.itir = itir};
2321 unsigned long psr;
2322 unsigned long ps = (vcpu->domain == dom0) ? _itir.ps :
2323 vcpu->arch.vhpt_pg_shift;
2325 check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
2327 // FIXME, must be inlined or potential for nested fault here!
2328 if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
2329 panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
2330 "smaller page size!\n");
2332 BUG_ON(_itir.ps > vcpu->arch.vhpt_pg_shift);
2333 vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
2334 psr = ia64_clear_ic();
2335 pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
2336 // FIXME: look for bigger mappings
2337 ia64_itc(IorD, vaddr, pte, IA64_ITIR_PS_KEY(ps, _itir.key));
2338 ia64_set_psr(psr);
2339 // ia64_srlz_i(); // no srls req'd, will rfi later
2340 if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
2341 // FIXME: this is dangerous... vhpt_flush_address ensures these
2342 // addresses never get flushed. More work needed if this
2343 // ever happens.
2344 //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
2345 if (_itir.ps > vcpu->arch.vhpt_pg_shift)
2346 vhpt_multiple_insert(vaddr, pte, _itir.itir);
2347 else
2348 vhpt_insert(vaddr, pte, _itir.itir);
2350 // even if domain pagesize is larger than PAGE_SIZE, just put
2351 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
2352 else {
2353 _itir.ps = vcpu->arch.vhpt_pg_shift;
2354 vhpt_insert(vaddr, pte, _itir.itir);
2358 IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2360 unsigned long pteval;
2361 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2362 struct p2m_entry entry;
2363 ia64_itir_t _itir = {.itir = itir};
2365 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2366 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2368 again:
2369 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2370 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2371 if (!pteval)
2372 return IA64_ILLOP_FAULT;
2373 if (swap_rr0)
2374 set_one_rr(0x0, PSCB(vcpu, rrs[0]));
2375 vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
2376 if (swap_rr0)
2377 set_metaphysical_rr0();
2378 if (p2m_entry_retry(&entry)) {
2379 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2380 goto again;
2382 vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
2383 return IA64_NO_FAULT;
2386 IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2388 unsigned long pteval;
2389 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2390 struct p2m_entry entry;
2391 ia64_itir_t _itir = {.itir = itir};
2393 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2394 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2396 again:
2397 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2398 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2399 if (!pteval)
2400 return IA64_ILLOP_FAULT;
2401 if (swap_rr0)
2402 set_one_rr(0x0, PSCB(vcpu, rrs[0]));
2403 vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
2404 if (swap_rr0)
2405 set_metaphysical_rr0();
2406 if (p2m_entry_retry(&entry)) {
2407 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2408 goto again;
2410 vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
2411 return IA64_NO_FAULT;
2414 IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
2416 BUG_ON(vcpu != current);
2418 check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
2420 /* Purge TC */
2421 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2422 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2424 /* Purge all tlb and vhpt */
2425 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2427 return IA64_NO_FAULT;
2430 // At privlvl=0, fc performs no access rights or protection key checks, while
2431 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
2432 // read but no protection key check. Thus in order to avoid an unexpected
2433 // access rights fault, we have to translate the virtual address to a
2434 // physical address (possibly via a metaphysical address) and do the fc
2435 // on the physical address, which is guaranteed to flush the same cache line
2436 IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
2438 // TODO: Only allowed for current vcpu
2439 u64 mpaddr, paddr;
2440 IA64FAULT fault;
2442 again:
2443 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
2444 if (fault == IA64_NO_FAULT) {
2445 struct p2m_entry entry;
2446 paddr = translate_domain_mpaddr(mpaddr, &entry);
2447 ia64_fc(__va(paddr));
2448 if (p2m_entry_retry(&entry))
2449 goto again;
2451 return fault;
2454 IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
2456 // Note that this only needs to be called once, i.e. the
2457 // architected loop to purge the entire TLB, should use
2458 // base = stride1 = stride2 = 0, count0 = count 1 = 1
2460 vcpu_flush_vtlb_all(current);
2462 return IA64_NO_FAULT;
2465 IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
2467 printk("vcpu_ptc_g: called, not implemented yet\n");
2468 return IA64_ILLOP_FAULT;
2471 IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
2473 // FIXME: validate not flushing Xen addresses
2474 // if (Xen address) return(IA64_ILLOP_FAULT);
2475 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
2476 //printk("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
2478 check_xen_space_overlap("ptc_ga", vadr, addr_range);
2480 domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
2482 return IA64_NO_FAULT;
2485 IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
2487 unsigned long region = vadr >> 61;
2488 u64 addr_range = 1UL << log_range;
2489 unsigned long rid, rr;
2490 int i;
2491 TR_ENTRY *trp;
2493 BUG_ON(vcpu != current);
2494 check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
2496 rr = PSCB(vcpu, rrs)[region];
2497 rid = rr & RR_RID_MASK;
2499 /* Purge TC */
2500 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2502 /* Purge tr and recompute dtr_regions. */
2503 vcpu->arch.dtr_regions = 0;
2504 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
2505 if (vcpu_match_tr_entry_range
2506 (trp, rid, vadr, vadr + addr_range))
2507 vcpu_purge_tr_entry(trp);
2508 else if (trp->pte.p)
2509 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2510 trp->vadr);
2512 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2514 return IA64_NO_FAULT;
2517 IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
2519 unsigned long region = vadr >> 61;
2520 u64 addr_range = 1UL << log_range;
2521 unsigned long rid, rr;
2522 int i;
2523 TR_ENTRY *trp;
2525 BUG_ON(vcpu != current);
2526 check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
2528 rr = PSCB(vcpu, rrs)[region];
2529 rid = rr & RR_RID_MASK;
2531 /* Purge TC */
2532 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2534 /* Purge tr and recompute itr_regions. */
2535 vcpu->arch.itr_regions = 0;
2536 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
2537 if (vcpu_match_tr_entry_range
2538 (trp, rid, vadr, vadr + addr_range))
2539 vcpu_purge_tr_entry(trp);
2540 else if (trp->pte.p)
2541 vcpu_quick_region_set(vcpu->arch.itr_regions,
2542 trp->vadr);
2544 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2546 return IA64_NO_FAULT;