ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 16338:303ac8ec2359

[IA64] Clean up of vcpu_init_regs().

Use symbolical macro value instead of magic number.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Wed Nov 07 10:45:35 2007 -0700 (2007-11-07)
parents 166bf3b04495
children 213a7029fdbc
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/xen.h>
11 #include <xen/mm.h>
12 #include <asm/ia64_int.h>
13 #include <asm/vcpu.h>
14 #include <asm/regionreg.h>
15 #include <asm/tlb.h>
16 #include <asm/processor.h>
17 #include <asm/delay.h>
18 #include <asm/vmx_vcpu.h>
19 #include <asm/vhpt.h>
20 #include <asm/tlbflush.h>
21 #include <asm/privop.h>
22 #include <xen/event.h>
23 #include <asm/vmx_phy_mode.h>
24 #include <asm/bundle.h>
25 #include <asm/privop_stat.h>
26 #include <asm/uaccess.h>
27 #include <asm/p2m_entry.h>
28 #include <asm/tlb_track.h>
30 /* FIXME: where these declarations should be there ? */
31 extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
32 struct pt_regs *regs);
33 extern void setreg(unsigned long regnum, unsigned long val, int nat,
34 struct pt_regs *regs);
35 extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
36 struct pt_regs *regs);
38 extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
39 struct pt_regs *regs);
41 typedef union {
42 struct ia64_psr ia64_psr;
43 unsigned long i64;
44 } PSR;
46 // this def for vcpu_regs won't work if kernel stack is present
47 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
49 #define IA64_PTA_SZ_BIT 2
50 #define IA64_PTA_VF_BIT 8
51 #define IA64_PTA_BASE_BIT 15
52 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
54 #define IA64_PSR_NON_VIRT_BITS \
55 (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | \
56 IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK | \
57 IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB | \
58 IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID | \
59 IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | \
60 IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
62 unsigned long vcpu_verbose = 0;
64 /**************************************************************************
65 VCPU general register access routines
66 **************************************************************************/
67 #ifdef XEN
68 u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
69 {
70 REGS *regs = vcpu_regs(vcpu);
71 u64 val;
73 if (!reg)
74 return 0;
75 getreg(reg, &val, 0, regs); // FIXME: handle NATs later
76 return val;
77 }
79 IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
80 {
81 REGS *regs = vcpu_regs(vcpu);
82 int nat;
84 getreg(reg, val, &nat, regs); // FIXME: handle NATs later
85 if (nat)
86 return IA64_NAT_CONSUMPTION_VECTOR;
87 return 0;
88 }
90 // returns:
91 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
92 // IA64_NO_FAULT otherwise
93 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
94 {
95 REGS *regs = vcpu_regs(vcpu);
96 long sof = (regs->cr_ifs) & 0x7f;
98 if (!reg)
99 return IA64_ILLOP_FAULT;
100 if (reg >= sof + 32)
101 return IA64_ILLOP_FAULT;
102 setreg(reg, value, nat, regs); // FIXME: handle NATs later
103 return IA64_NO_FAULT;
104 }
106 IA64FAULT
107 vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
108 {
109 REGS *regs = vcpu_regs(vcpu);
110 getfpreg(reg, val, regs); // FIXME: handle NATs later
111 return IA64_NO_FAULT;
112 }
114 IA64FAULT
115 vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
116 {
117 REGS *regs = vcpu_regs(vcpu);
118 if (reg > 1)
119 setfpreg(reg, val, regs); // FIXME: handle NATs later
120 return IA64_NO_FAULT;
121 }
123 #else
124 // returns:
125 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
126 // IA64_NO_FAULT otherwise
127 IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
128 {
129 REGS *regs = vcpu_regs(vcpu);
130 long sof = (regs->cr_ifs) & 0x7f;
132 if (!reg)
133 return IA64_ILLOP_FAULT;
134 if (reg >= sof + 32)
135 return IA64_ILLOP_FAULT;
136 setreg(reg, value, 0, regs); // FIXME: handle NATs later
137 return IA64_NO_FAULT;
138 }
140 #endif
142 void vcpu_init_regs(struct vcpu *v)
143 {
144 struct pt_regs *regs;
146 regs = vcpu_regs(v);
147 if (VMX_DOMAIN(v)) {
148 /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
149 regs->cr_ipsr = IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT |
150 IA64_PSR_I | IA64_PSR_IC | IA64_PSR_SI |
151 IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_VM;
152 /* lazy fp */
153 FP_PSR(v) = IA64_PSR_DFH;
154 regs->cr_ipsr |= IA64_PSR_DFH;
155 } else {
156 regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
157 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
158 regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
159 | IA64_PSR_RI | IA64_PSR_IS);
160 // domain runs at PL2
161 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
162 // lazy fp
163 PSCB(v, hpsr_dfh) = 1;
164 PSCB(v, hpsr_mfh) = 0;
165 regs->cr_ipsr |= IA64_PSR_DFH;
166 }
167 regs->cr_ifs = 1UL << 63; /* or clear? */
168 regs->ar_fpsr = FPSR_DEFAULT;
170 if (VMX_DOMAIN(v)) {
171 vmx_init_all_rr(v);
172 /* Virtual processor context setup */
173 VCPU(v, vpsr) = IA64_PSR_BN;
174 VCPU(v, dcr) = 0;
175 } else {
176 init_all_rr(v);
177 regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
178 VCPU(v, banknum) = 1;
179 VCPU(v, metaphysical_mode) = 1;
180 VCPU(v, interrupt_mask_addr) =
181 (unsigned char *)v->domain->arch.shared_info_va +
182 INT_ENABLE_OFFSET(v);
183 VCPU(v, itv) = (1 << 16); /* timer vector masked */
185 v->vcpu_info->evtchn_upcall_pending = 0;
186 v->vcpu_info->evtchn_upcall_mask = -1;
187 }
189 /* pta.size must not be 0. The minimum is 15 (32k) */
190 VCPU(v, pta) = 15 << 2;
192 v->arch.domain_itm_last = -1L;
193 }
195 /**************************************************************************
196 VCPU privileged application register access routines
197 **************************************************************************/
199 void vcpu_load_kernel_regs(VCPU * vcpu)
200 {
201 ia64_set_kr(0, VCPU(vcpu, krs[0]));
202 ia64_set_kr(1, VCPU(vcpu, krs[1]));
203 ia64_set_kr(2, VCPU(vcpu, krs[2]));
204 ia64_set_kr(3, VCPU(vcpu, krs[3]));
205 ia64_set_kr(4, VCPU(vcpu, krs[4]));
206 ia64_set_kr(5, VCPU(vcpu, krs[5]));
207 ia64_set_kr(6, VCPU(vcpu, krs[6]));
208 ia64_set_kr(7, VCPU(vcpu, krs[7]));
209 }
211 /* GCC 4.0.2 seems not to be able to suppress this call!. */
212 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
214 IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
215 {
216 if (reg == 44)
217 return vcpu_set_itc(vcpu, val);
218 else if (reg == 27)
219 return IA64_ILLOP_FAULT;
220 else if (reg == 24)
221 printk("warning: setting ar.eflg is a no-op; no IA-32 "
222 "support\n");
223 else if (reg > 7)
224 return IA64_ILLOP_FAULT;
225 else {
226 PSCB(vcpu, krs[reg]) = val;
227 ia64_set_kr(reg, val);
228 }
229 return IA64_NO_FAULT;
230 }
232 IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
233 {
234 if (reg == 24)
235 printk("warning: getting ar.eflg is a no-op; no IA-32 "
236 "support\n");
237 else if (reg > 7)
238 return IA64_ILLOP_FAULT;
239 else
240 *val = PSCB(vcpu, krs[reg]);
241 return IA64_NO_FAULT;
242 }
244 /**************************************************************************
245 VCPU protection key emulating for PV
246 This first implementation reserves 1 pkr for the hypervisor key.
247 On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the
248 hypervisor may run with psr.pk==1. The key for the hypervisor is 0.
249 Furthermore the VCPU is flagged to use the protection keys.
250 Currently the domU has to take care of the used keys, because on setting
251 a pkr there is no check against other pkr's whether this key is already
252 used.
253 **************************************************************************/
255 /* The function loads the protection key registers from the struct arch_vcpu
256 * into the processor pkr's! Called in context_switch().
257 * TODO: take care of the order of writing pkr's!
258 */
259 void vcpu_pkr_load_regs(VCPU * vcpu)
260 {
261 int i;
263 for (i = 0; i <= XEN_IA64_NPKRS; i++)
264 ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));
265 }
267 /* The function activates the pkr handling. */
268 static void vcpu_pkr_set_psr_handling(VCPU * vcpu)
269 {
270 if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)
271 return;
273 vcpu_pkr_use_set(vcpu);
274 PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;
276 /* Write the special key for the hypervisor into pkr[15]. */
277 ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);
278 }
280 /**************************************************************************
281 VCPU processor status register access routines
282 **************************************************************************/
284 static void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
285 {
286 /* only do something if mode changes */
287 if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
288 PSCB(vcpu, metaphysical_mode) = newmode;
289 if (newmode)
290 set_metaphysical_rr0();
291 else
292 set_virtual_rr0();
293 }
294 }
296 IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
297 {
298 vcpu_set_metaphysical_mode(vcpu, TRUE);
299 return IA64_NO_FAULT;
300 }
302 IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
303 {
304 struct ia64_psr psr, imm, *ipsr;
305 REGS *regs = vcpu_regs(vcpu);
307 //PRIVOP_COUNT_ADDR(regs,_RSM);
308 // TODO: All of these bits need to be virtualized
309 // TODO: Only allowed for current vcpu
310 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
311 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
312 imm = *(struct ia64_psr *)&imm24;
313 // interrupt flag
314 if (imm.i)
315 vcpu->vcpu_info->evtchn_upcall_mask = 1;
316 if (imm.ic)
317 PSCB(vcpu, interrupt_collection_enabled) = 0;
318 // interrupt collection flag
319 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
320 // just handle psr.up and psr.pp for now
321 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
322 IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
323 IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))
324 return IA64_ILLOP_FAULT;
325 if (imm.dfh) {
326 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
327 PSCB(vcpu, vpsr_dfh) = 0;
328 }
329 if (imm.dfl)
330 ipsr->dfl = 0;
331 if (imm.pp) {
332 ipsr->pp = 1;
333 psr.pp = 1; // priv perf ctrs always enabled
334 PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
335 }
336 if (imm.up) {
337 ipsr->up = 0;
338 psr.up = 0;
339 }
340 if (imm.sp) {
341 ipsr->sp = 0;
342 psr.sp = 0;
343 }
344 if (imm.be)
345 ipsr->be = 0;
346 if (imm.dt)
347 vcpu_set_metaphysical_mode(vcpu, TRUE);
348 if (imm.pk) {
349 ipsr->pk = 0;
350 vcpu_pkr_use_unset(vcpu);
351 }
352 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
353 return IA64_NO_FAULT;
354 }
356 IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
357 {
358 vcpu_set_metaphysical_mode(vcpu, FALSE);
359 return IA64_NO_FAULT;
360 }
362 IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
363 {
364 vcpu->vcpu_info->evtchn_upcall_mask = 0;
365 PSCB(vcpu, interrupt_collection_enabled) = 1;
366 return IA64_NO_FAULT;
367 }
369 IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
370 {
371 struct ia64_psr psr, imm, *ipsr;
372 REGS *regs = vcpu_regs(vcpu);
373 u64 mask, enabling_interrupts = 0;
375 //PRIVOP_COUNT_ADDR(regs,_SSM);
376 // TODO: All of these bits need to be virtualized
377 __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
378 imm = *(struct ia64_psr *)&imm24;
379 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
380 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
381 mask =
382 IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
383 IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |
384 IA64_PSR_PK;
385 if (imm24 & ~mask)
386 return IA64_ILLOP_FAULT;
387 if (imm.dfh) {
388 PSCB(vcpu, vpsr_dfh) = 1;
389 ipsr->dfh = 1;
390 }
391 if (imm.dfl)
392 ipsr->dfl = 1;
393 if (imm.pp) {
394 ipsr->pp = 1;
395 psr.pp = 1;
396 PSCB(vcpu, vpsr_pp) = 1;
397 }
398 if (imm.sp) {
399 ipsr->sp = 1;
400 psr.sp = 1;
401 }
402 if (imm.i) {
403 if (vcpu->vcpu_info->evtchn_upcall_mask) {
404 //printk("vcpu_set_psr_sm: psr.ic 0->1\n");
405 enabling_interrupts = 1;
406 }
407 vcpu->vcpu_info->evtchn_upcall_mask = 0;
408 }
409 if (imm.ic)
410 PSCB(vcpu, interrupt_collection_enabled) = 1;
411 // TODO: do this faster
412 if (imm.mfl) {
413 ipsr->mfl = 1;
414 psr.mfl = 1;
415 }
416 if (imm.mfh) {
417 ipsr->mfh = 1;
418 psr.mfh = 1;
419 }
420 if (imm.ac) {
421 ipsr->ac = 1;
422 psr.ac = 1;
423 }
424 if (imm.up) {
425 ipsr->up = 1;
426 psr.up = 1;
427 }
428 if (imm.be)
429 ipsr->be = 1;
430 if (imm.dt)
431 vcpu_set_metaphysical_mode(vcpu, FALSE);
432 if (imm.pk) {
433 vcpu_pkr_set_psr_handling(vcpu);
434 ipsr->pk = 1;
435 }
436 __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
437 if (enabling_interrupts &&
438 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
439 PSCB(vcpu, pending_interruption) = 1;
440 return IA64_NO_FAULT;
441 }
443 IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
444 {
445 struct ia64_psr newpsr, *ipsr;
446 REGS *regs = vcpu_regs(vcpu);
447 u64 enabling_interrupts = 0;
449 newpsr = *(struct ia64_psr *)&val;
450 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
451 // just handle psr.up and psr.pp for now
452 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
453 // return IA64_ILLOP_FAULT;
454 // however trying to set other bits can't be an error as it is in ssm
455 if (newpsr.dfh) {
456 ipsr->dfh = 1;
457 PSCB(vcpu, vpsr_dfh) = 1;
458 } else {
459 ipsr->dfh = PSCB(vcpu, hpsr_dfh);
460 PSCB(vcpu, vpsr_dfh) = 0;
461 }
462 if (newpsr.dfl)
463 ipsr->dfl = 1;
464 if (newpsr.pp) {
465 ipsr->pp = 1;
466 PSCB(vcpu, vpsr_pp) = 1;
467 } else {
468 ipsr->pp = 1;
469 PSCB(vcpu, vpsr_pp) = 0;
470 }
471 if (newpsr.up)
472 ipsr->up = 1;
473 if (newpsr.sp)
474 ipsr->sp = 1;
475 if (newpsr.i) {
476 if (vcpu->vcpu_info->evtchn_upcall_mask)
477 enabling_interrupts = 1;
478 vcpu->vcpu_info->evtchn_upcall_mask = 0;
479 }
480 if (newpsr.ic)
481 PSCB(vcpu, interrupt_collection_enabled) = 1;
482 if (newpsr.mfl)
483 ipsr->mfl = 1;
484 if (newpsr.mfh)
485 ipsr->mfh = 1;
486 if (newpsr.ac)
487 ipsr->ac = 1;
488 if (newpsr.up)
489 ipsr->up = 1;
490 if (newpsr.dt && newpsr.rt)
491 vcpu_set_metaphysical_mode(vcpu, FALSE);
492 else
493 vcpu_set_metaphysical_mode(vcpu, TRUE);
494 if (newpsr.be)
495 ipsr->be = 1;
496 if (newpsr.pk) {
497 vcpu_pkr_set_psr_handling(vcpu);
498 ipsr->pk = 1;
499 } else
500 vcpu_pkr_use_unset(vcpu);
501 if (enabling_interrupts &&
502 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
503 PSCB(vcpu, pending_interruption) = 1;
504 return IA64_NO_FAULT;
505 }
507 IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val)
508 {
509 IA64_PSR newpsr, vpsr;
510 REGS *regs = vcpu_regs(vcpu);
511 u64 enabling_interrupts = 0;
513 /* Copy non-virtualized bits. */
514 newpsr.val = val & IA64_PSR_NON_VIRT_BITS;
516 /* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0) */
517 newpsr.val |= IA64_PSR_DI;
519 newpsr.val |= IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT |
520 IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI | IA64_PSR_PP;
522 vpsr.val = val;
524 if (val & IA64_PSR_DFH) {
525 newpsr.dfh = 1;
526 PSCB(vcpu, vpsr_dfh) = 1;
527 } else {
528 newpsr.dfh = PSCB(vcpu, hpsr_dfh);
529 PSCB(vcpu, vpsr_dfh) = 0;
530 }
532 PSCB(vcpu, vpsr_pp) = vpsr.pp;
534 if (vpsr.i) {
535 if (vcpu->vcpu_info->evtchn_upcall_mask)
536 enabling_interrupts = 1;
538 vcpu->vcpu_info->evtchn_upcall_mask = 0;
540 if (enabling_interrupts &&
541 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
542 PSCB(vcpu, pending_interruption) = 1;
543 } else
544 vcpu->vcpu_info->evtchn_upcall_mask = 1;
546 PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
547 vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
549 newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL);
551 if (PSCB(vcpu, banknum) != vpsr.bn) {
552 if (vpsr.bn)
553 vcpu_bsw1(vcpu);
554 else
555 vcpu_bsw0(vcpu);
556 }
557 if (vpsr.pk) {
558 vcpu_pkr_set_psr_handling(vcpu);
559 newpsr.pk = 1;
560 } else
561 vcpu_pkr_use_unset(vcpu);
563 regs->cr_ipsr = newpsr.val;
565 return IA64_NO_FAULT;
566 }
568 u64 vcpu_get_psr(VCPU * vcpu)
569 {
570 REGS *regs = vcpu_regs(vcpu);
571 PSR newpsr;
572 PSR ipsr;
574 ipsr.i64 = regs->cr_ipsr;
576 /* Copy non-virtualized bits. */
577 newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS;
579 /* Bits forced to 1 (psr.si and psr.is are forced to 0) */
580 newpsr.i64 |= IA64_PSR_DI;
582 /* System mask. */
583 newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
584 newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
586 if (!PSCB(vcpu, metaphysical_mode))
587 newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
589 newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
590 newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
592 /* Fool cpl. */
593 if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
594 newpsr.ia64_psr.cpl = 0;
595 else
596 newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
598 newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
600 return newpsr.i64;
601 }
603 IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
604 {
605 u64 psr = vcpu_get_psr(vcpu);
606 *pval = psr & (MASK(0, 32) | MASK(35, 2));
607 return IA64_NO_FAULT;
608 }
610 BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
611 {
612 return !!PSCB(vcpu, interrupt_collection_enabled);
613 }
615 BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
616 {
617 return !vcpu->vcpu_info->evtchn_upcall_mask;
618 }
620 /**************************************************************************
621 VCPU control register access routines
622 **************************************************************************/
624 IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
625 {
626 *pval = PSCB(vcpu, dcr);
627 return IA64_NO_FAULT;
628 }
630 IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
631 {
632 if (VMX_DOMAIN(vcpu))
633 *pval = PSCB(vcpu, iva) & ~0x7fffL;
634 else
635 *pval = PSCBX(vcpu, iva) & ~0x7fffL;
637 return IA64_NO_FAULT;
638 }
640 IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
641 {
642 *pval = PSCB(vcpu, pta);
643 return IA64_NO_FAULT;
644 }
646 IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
647 {
648 //REGS *regs = vcpu_regs(vcpu);
649 //*pval = regs->cr_ipsr;
650 *pval = PSCB(vcpu, ipsr);
651 return IA64_NO_FAULT;
652 }
654 IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
655 {
656 *pval = PSCB(vcpu, isr);
657 return IA64_NO_FAULT;
658 }
660 IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
661 {
662 //REGS *regs = vcpu_regs(vcpu);
663 //*pval = regs->cr_iip;
664 *pval = PSCB(vcpu, iip);
665 return IA64_NO_FAULT;
666 }
668 IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
669 {
670 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
671 *pval = PSCB(vcpu, ifa);
672 return IA64_NO_FAULT;
673 }
675 unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
676 {
677 ia64_rr rr;
679 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
680 return rr.ps;
681 }
683 unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
684 {
685 ia64_rr rr;
687 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
688 return rr.rid;
689 }
691 unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
692 {
693 ia64_rr rr;
695 rr.rrval = 0;
696 rr.ps = vcpu_get_rr_ps(vcpu, ifa);
697 rr.rid = vcpu_get_rr_rid(vcpu, ifa);
698 return rr.rrval;
699 }
701 IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
702 {
703 u64 val = PSCB(vcpu, itir);
704 *pval = val;
705 return IA64_NO_FAULT;
706 }
708 IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
709 {
710 u64 val = PSCB(vcpu, iipa);
711 // SP entry code does not save iipa yet nor does it get
712 // properly delivered in the pscb
713 // printk("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
714 *pval = val;
715 return IA64_NO_FAULT;
716 }
718 IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
719 {
720 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
721 //*pval = PSCB(vcpu,regs).cr_ifs;
722 *pval = PSCB(vcpu, ifs);
723 return IA64_NO_FAULT;
724 }
726 IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
727 {
728 u64 val = PSCB(vcpu, iim);
729 *pval = val;
730 return IA64_NO_FAULT;
731 }
733 IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
734 {
735 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
736 *pval = PSCB(vcpu, iha);
737 return IA64_NO_FAULT;
738 }
740 IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
741 {
742 PSCB(vcpu, dcr) = val;
743 return IA64_NO_FAULT;
744 }
746 IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
747 {
748 if (VMX_DOMAIN(vcpu))
749 PSCB(vcpu, iva) = val & ~0x7fffL;
750 else
751 PSCBX(vcpu, iva) = val & ~0x7fffL;
753 return IA64_NO_FAULT;
754 }
756 IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
757 {
758 if (val & (0x3f << 9)) /* reserved fields */
759 return IA64_RSVDREG_FAULT;
760 if (val & 2) /* reserved fields */
761 return IA64_RSVDREG_FAULT;
762 PSCB(vcpu, pta) = val;
763 return IA64_NO_FAULT;
764 }
766 IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
767 {
768 PSCB(vcpu, ipsr) = val;
769 return IA64_NO_FAULT;
770 }
772 IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
773 {
774 PSCB(vcpu, isr) = val;
775 return IA64_NO_FAULT;
776 }
778 IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
779 {
780 PSCB(vcpu, iip) = val;
781 return IA64_NO_FAULT;
782 }
784 IA64FAULT vcpu_increment_iip(VCPU * vcpu)
785 {
786 REGS *regs = vcpu_regs(vcpu);
787 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
788 if (ipsr->ri == 2) {
789 ipsr->ri = 0;
790 regs->cr_iip += 16;
791 } else
792 ipsr->ri++;
793 return IA64_NO_FAULT;
794 }
796 IA64FAULT vcpu_decrement_iip(VCPU * vcpu)
797 {
798 REGS *regs = vcpu_regs(vcpu);
799 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
801 if (ipsr->ri == 0) {
802 ipsr->ri = 2;
803 regs->cr_iip -= 16;
804 } else
805 ipsr->ri--;
807 return IA64_NO_FAULT;
808 }
810 IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
811 {
812 PSCB(vcpu, ifa) = val;
813 return IA64_NO_FAULT;
814 }
816 IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
817 {
818 PSCB(vcpu, itir) = val;
819 return IA64_NO_FAULT;
820 }
822 IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
823 {
824 // SP entry code does not save iipa yet nor does it get
825 // properly delivered in the pscb
826 // printk("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
827 PSCB(vcpu, iipa) = val;
828 return IA64_NO_FAULT;
829 }
831 IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
832 {
833 //REGS *regs = vcpu_regs(vcpu);
834 PSCB(vcpu, ifs) = val;
835 return IA64_NO_FAULT;
836 }
838 IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
839 {
840 PSCB(vcpu, iim) = val;
841 return IA64_NO_FAULT;
842 }
844 IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
845 {
846 PSCB(vcpu, iha) = val;
847 return IA64_NO_FAULT;
848 }
850 /**************************************************************************
851 VCPU interrupt control register access routines
852 **************************************************************************/
854 void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
855 {
856 PSCB(vcpu, pending_interruption) = 1;
857 }
859 void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
860 {
861 if (vector & ~0xff) {
862 printk("vcpu_pend_interrupt: bad vector\n");
863 return;
864 }
866 if (vcpu->arch.event_callback_ip) {
867 printk("Deprecated interface. Move to new event based "
868 "solution\n");
869 return;
870 }
872 if (VMX_DOMAIN(vcpu)) {
873 set_bit(vector, VCPU(vcpu, irr));
874 } else {
875 set_bit(vector, PSCBX(vcpu, irr));
876 PSCB(vcpu, pending_interruption) = 1;
877 }
878 }
880 #define IA64_TPR_MMI 0x10000
881 #define IA64_TPR_MIC 0x000f0
883 /* checks to see if a VCPU has any unmasked pending interrupts
884 * if so, returns the highest, else returns SPURIOUS_VECTOR */
885 /* NOTE: Since this gets called from vcpu_get_ivr() and the
886 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
887 * this routine also ignores pscb.interrupt_delivery_enabled
888 * and this must be checked independently; see vcpu_deliverable interrupts() */
889 u64 vcpu_check_pending_interrupts(VCPU * vcpu)
890 {
891 u64 *p, *r, bits, bitnum, mask, i, vector;
893 if (vcpu->arch.event_callback_ip)
894 return SPURIOUS_VECTOR;
896 /* Always check pending event, since guest may just ack the
897 * event injection without handle. Later guest may throw out
898 * the event itself.
899 */
900 check_start:
901 if (event_pending(vcpu) &&
902 !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
903 &PSCBX(vcpu, insvc[0])))
904 vcpu_pend_interrupt(vcpu,
905 vcpu->domain->shared_info->arch.
906 evtchn_vector);
908 p = &PSCBX(vcpu, irr[3]);
909 r = &PSCBX(vcpu, insvc[3]);
910 for (i = 3 ;; p--, r--, i--) {
911 bits = *p;
912 if (bits)
913 break; // got a potential interrupt
914 if (*r) {
915 // nothing in this word which is pending+inservice
916 // but there is one inservice which masks lower
917 return SPURIOUS_VECTOR;
918 }
919 if (i == 0) {
920 // checked all bits... nothing pending+inservice
921 return SPURIOUS_VECTOR;
922 }
923 }
924 // have a pending,deliverable interrupt... see if it is masked
925 bitnum = ia64_fls(bits);
926 //printk("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
927 vector = bitnum + (i * 64);
928 mask = 1L << bitnum;
929 /* sanity check for guest timer interrupt */
930 if (vector == (PSCB(vcpu, itv) & 0xff)) {
931 uint64_t now = ia64_get_itc();
932 if (now < PSCBX(vcpu, domain_itm)) {
933 // printk("Ooops, pending guest timer before its due\n");
934 PSCBX(vcpu, irr[i]) &= ~mask;
935 goto check_start;
936 }
937 }
938 //printk("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector);
939 if (*r >= mask) {
940 // masked by equal inservice
941 //printk("but masked by equal inservice\n");
942 return SPURIOUS_VECTOR;
943 }
944 if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
945 // tpr.mmi is set
946 //printk("but masked by tpr.mmi\n");
947 return SPURIOUS_VECTOR;
948 }
949 if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
950 //tpr.mic masks class
951 //printk("but masked by tpr.mic\n");
952 return SPURIOUS_VECTOR;
953 }
954 //printk("returned to caller\n");
955 return vector;
956 }
958 u64 vcpu_deliverable_interrupts(VCPU * vcpu)
959 {
960 return (vcpu_get_psr_i(vcpu) &&
961 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
962 }
964 u64 vcpu_deliverable_timer(VCPU * vcpu)
965 {
966 return (vcpu_get_psr_i(vcpu) &&
967 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
968 }
970 IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
971 {
972 /* Use EID=0, ID=vcpu_id. */
973 *pval = vcpu->vcpu_id << 24;
974 return IA64_NO_FAULT;
975 }
977 IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
978 {
979 int i;
980 u64 vector, mask;
982 #define HEARTBEAT_FREQ 16 // period in seconds
983 #ifdef HEARTBEAT_FREQ
984 #define N_DOMS 16 // period in seconds
985 #if 0
986 static long count[N_DOMS] = { 0 };
987 #endif
988 static long nonclockcount[N_DOMS] = { 0 };
989 unsigned domid = vcpu->domain->domain_id;
990 #endif
991 #ifdef IRQ_DEBUG
992 static char firstivr = 1;
993 static char firsttime[256];
994 if (firstivr) {
995 int i;
996 for (i = 0; i < 256; i++)
997 firsttime[i] = 1;
998 firstivr = 0;
999 }
1000 #endif
1002 vector = vcpu_check_pending_interrupts(vcpu);
1003 if (vector == SPURIOUS_VECTOR) {
1004 PSCB(vcpu, pending_interruption) = 0;
1005 *pval = vector;
1006 return IA64_NO_FAULT;
1008 #ifdef HEARTBEAT_FREQ
1009 if (domid >= N_DOMS)
1010 domid = N_DOMS - 1;
1011 #if 0
1012 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1013 if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
1014 printk("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
1015 domid, count[domid], nonclockcount[domid]);
1016 //count[domid] = 0;
1017 //dump_runq();
1020 #endif
1021 else
1022 nonclockcount[domid]++;
1023 #endif
1024 // now have an unmasked, pending, deliverable vector!
1025 // getting ivr has "side effects"
1026 #ifdef IRQ_DEBUG
1027 if (firsttime[vector]) {
1028 printk("*** First get_ivr on vector=%lu,itc=%lx\n",
1029 vector, ia64_get_itc());
1030 firsttime[vector] = 0;
1032 #endif
1033 /* if delivering a timer interrupt, remember domain_itm, which
1034 * needs to be done before clearing irr
1035 */
1036 if (vector == (PSCB(vcpu, itv) & 0xff)) {
1037 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1040 i = vector >> 6;
1041 mask = 1L << (vector & 0x3f);
1042 //printk("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
1043 PSCBX(vcpu, insvc[i]) |= mask;
1044 PSCBX(vcpu, irr[i]) &= ~mask;
1045 //PSCB(vcpu,pending_interruption)--;
1046 *pval = vector;
1047 return IA64_NO_FAULT;
1050 IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
1052 *pval = PSCB(vcpu, tpr);
1053 return IA64_NO_FAULT;
1056 IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
1058 *pval = 0L; // reads of eoi always return 0
1059 return IA64_NO_FAULT;
1062 IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
1064 *pval = PSCBX(vcpu, irr[0]);
1065 return IA64_NO_FAULT;
1068 IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
1070 *pval = PSCBX(vcpu, irr[1]);
1071 return IA64_NO_FAULT;
1074 IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
1076 *pval = PSCBX(vcpu, irr[2]);
1077 return IA64_NO_FAULT;
1080 IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
1082 *pval = PSCBX(vcpu, irr[3]);
1083 return IA64_NO_FAULT;
1086 IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
1088 *pval = PSCB(vcpu, itv);
1089 return IA64_NO_FAULT;
1092 IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
1094 *pval = PSCB(vcpu, pmv);
1095 return IA64_NO_FAULT;
1098 IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
1100 *pval = PSCB(vcpu, cmcv);
1101 return IA64_NO_FAULT;
1104 IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
1106 // fix this when setting values other than m-bit is supported
1107 gdprintk(XENLOG_DEBUG,
1108 "vcpu_get_lrr0: Unmasked interrupts unsupported\n");
1109 *pval = (1L << 16);
1110 return IA64_NO_FAULT;
1113 IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
1115 // fix this when setting values other than m-bit is supported
1116 gdprintk(XENLOG_DEBUG,
1117 "vcpu_get_lrr1: Unmasked interrupts unsupported\n");
1118 *pval = (1L << 16);
1119 return IA64_NO_FAULT;
1122 IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
1124 printk("vcpu_set_lid: Setting cr.lid is unsupported\n");
1125 return IA64_ILLOP_FAULT;
1128 IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
1130 if (val & 0xff00)
1131 return IA64_RSVDREG_FAULT;
1132 PSCB(vcpu, tpr) = val;
1133 /* This can unmask interrupts. */
1134 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1135 PSCB(vcpu, pending_interruption) = 1;
1136 return IA64_NO_FAULT;
1139 IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
1141 u64 *p, bits, vec, bitnum;
1142 int i;
1144 p = &PSCBX(vcpu, insvc[3]);
1145 for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
1147 if (i < 0) {
1148 printk("Trying to EOI interrupt when none are in-service.\n");
1149 return IA64_NO_FAULT;
1151 bitnum = ia64_fls(bits);
1152 vec = bitnum + (i * 64);
1153 /* clear the correct bit */
1154 bits &= ~(1L << bitnum);
1155 *p = bits;
1156 /* clearing an eoi bit may unmask another pending interrupt... */
1157 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
1158 // worry about this later... Linux only calls eoi
1159 // with interrupts disabled
1160 printk("Trying to EOI interrupt with interrupts enabled\n");
1162 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
1163 PSCB(vcpu, pending_interruption) = 1;
1164 //printk("YYYYY vcpu_set_eoi: Successful\n");
1165 return IA64_NO_FAULT;
1168 IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
1170 if (!(val & (1L << 16))) {
1171 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1172 return IA64_ILLOP_FAULT;
1174 // no place to save this state but nothing to do anyway
1175 return IA64_NO_FAULT;
1178 IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
1180 if (!(val & (1L << 16))) {
1181 printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
1182 return IA64_ILLOP_FAULT;
1184 // no place to save this state but nothing to do anyway
1185 return IA64_NO_FAULT;
1188 IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
1190 /* Check reserved fields. */
1191 if (val & 0xef00)
1192 return IA64_ILLOP_FAULT;
1193 PSCB(vcpu, itv) = val;
1194 if (val & 0x10000) {
1195 /* Disable itm. */
1196 PSCBX(vcpu, domain_itm) = 0;
1197 } else
1198 vcpu_set_next_timer(vcpu);
1199 return IA64_NO_FAULT;
1202 IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
1204 if (val & 0xef00) /* reserved fields */
1205 return IA64_RSVDREG_FAULT;
1206 PSCB(vcpu, pmv) = val;
1207 return IA64_NO_FAULT;
1210 IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
1212 if (val & 0xef00) /* reserved fields */
1213 return IA64_RSVDREG_FAULT;
1214 PSCB(vcpu, cmcv) = val;
1215 return IA64_NO_FAULT;
1218 /**************************************************************************
1219 VCPU temporary register access routines
1220 **************************************************************************/
1221 u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
1223 if (index > 7)
1224 return 0;
1225 return PSCB(vcpu, tmp[index]);
1228 void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
1230 if (index <= 7)
1231 PSCB(vcpu, tmp[index]) = val;
1234 /**************************************************************************
1235 Interval timer routines
1236 **************************************************************************/
1238 BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
1240 u64 itv = PSCB(vcpu, itv);
1241 return (!itv || !!(itv & 0x10000));
1244 BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
1246 u64 itv = PSCB(vcpu, itv);
1247 return test_bit(itv, PSCBX(vcpu, insvc));
1250 BOOLEAN vcpu_timer_expired(VCPU * vcpu)
1252 unsigned long domain_itm = PSCBX(vcpu, domain_itm);
1253 unsigned long now = ia64_get_itc();
1255 if (!domain_itm)
1256 return FALSE;
1257 if (now < domain_itm)
1258 return FALSE;
1259 if (vcpu_timer_disabled(vcpu))
1260 return FALSE;
1261 return TRUE;
1264 void vcpu_safe_set_itm(unsigned long val)
1266 unsigned long epsilon = 100;
1267 unsigned long flags;
1268 u64 now = ia64_get_itc();
1270 local_irq_save(flags);
1271 while (1) {
1272 //printk("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1273 ia64_set_itm(val);
1274 if (val > (now = ia64_get_itc()))
1275 break;
1276 val = now + epsilon;
1277 epsilon <<= 1;
1279 local_irq_restore(flags);
1282 void vcpu_set_next_timer(VCPU * vcpu)
1284 u64 d = PSCBX(vcpu, domain_itm);
1285 //u64 s = PSCBX(vcpu,xen_itm);
1286 u64 s = local_cpu_data->itm_next;
1287 u64 now = ia64_get_itc();
1289 /* gloss over the wraparound problem for now... we know it exists
1290 * but it doesn't matter right now */
1292 if (is_idle_domain(vcpu->domain)) {
1293 // printk("****** vcpu_set_next_timer called during idle!!\n");
1294 vcpu_safe_set_itm(s);
1295 return;
1297 //s = PSCBX(vcpu,xen_itm);
1298 if (d && (d > now) && (d < s)) {
1299 vcpu_safe_set_itm(d);
1300 //using_domain_as_itm++;
1301 } else {
1302 vcpu_safe_set_itm(s);
1303 //using_xen_as_itm++;
1307 IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
1309 //UINT now = ia64_get_itc();
1311 //if (val < now) val = now + 1000;
1312 //printk("*** vcpu_set_itm: called with %lx\n",val);
1313 PSCBX(vcpu, domain_itm) = val;
1314 vcpu_set_next_timer(vcpu);
1315 return IA64_NO_FAULT;
1318 IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
1320 #define DISALLOW_SETTING_ITC_FOR_NOW
1321 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1322 static int did_print;
1323 if (!did_print) {
1324 printk("vcpu_set_itc: Setting ar.itc is currently disabled "
1325 "(this message is only displayed once)\n");
1326 did_print = 1;
1328 #else
1329 u64 oldnow = ia64_get_itc();
1330 u64 olditm = PSCBX(vcpu, domain_itm);
1331 unsigned long d = olditm - oldnow;
1332 unsigned long x = local_cpu_data->itm_next - oldnow;
1334 u64 newnow = val, min_delta;
1336 local_irq_disable();
1337 if (olditm) {
1338 printk("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
1339 newnow + d);
1340 PSCBX(vcpu, domain_itm) = newnow + d;
1342 local_cpu_data->itm_next = newnow + x;
1343 d = PSCBX(vcpu, domain_itm);
1344 x = local_cpu_data->itm_next;
1346 ia64_set_itc(newnow);
1347 if (d && (d > newnow) && (d < x)) {
1348 vcpu_safe_set_itm(d);
1349 //using_domain_as_itm++;
1350 } else {
1351 vcpu_safe_set_itm(x);
1352 //using_xen_as_itm++;
1354 local_irq_enable();
1355 #endif
1356 return IA64_NO_FAULT;
1359 IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
1361 //FIXME: Implement this
1362 printk("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1363 return IA64_NO_FAULT;
1364 //return IA64_ILLOP_FAULT;
1367 IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
1369 //TODO: Implement this
1370 printk("vcpu_get_itc: Getting ar.itc is unsupported\n");
1371 return IA64_ILLOP_FAULT;
1374 void vcpu_pend_timer(VCPU * vcpu)
1376 u64 itv = PSCB(vcpu, itv) & 0xff;
1378 if (vcpu_timer_disabled(vcpu))
1379 return;
1380 //if (vcpu_timer_inservice(vcpu)) return;
1381 if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
1382 // already delivered an interrupt for this so
1383 // don't deliver another
1384 return;
1386 if (vcpu->arch.event_callback_ip) {
1387 /* A small window may occur when injecting vIRQ while related
1388 * handler has not been registered. Don't fire in such case.
1389 */
1390 if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
1391 send_guest_vcpu_virq(vcpu, VIRQ_ITC);
1392 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
1394 } else
1395 vcpu_pend_interrupt(vcpu, itv);
1398 // returns true if ready to deliver a timer interrupt too early
1399 u64 vcpu_timer_pending_early(VCPU * vcpu)
1401 u64 now = ia64_get_itc();
1402 u64 itm = PSCBX(vcpu, domain_itm);
1404 if (vcpu_timer_disabled(vcpu))
1405 return 0;
1406 if (!itm)
1407 return 0;
1408 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1411 /**************************************************************************
1412 Privileged operation emulation routines
1413 **************************************************************************/
1415 static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
1417 PSCB(vcpu, ifa) = ifa;
1418 PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
1419 vcpu_thash(current, ifa, &PSCB(current, iha));
1422 IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
1424 vcpu_force_tlb_miss(vcpu, ifa);
1425 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
1426 IA64_ALT_INST_TLB_VECTOR;
1429 IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
1431 vcpu_force_tlb_miss(vcpu, ifa);
1432 return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
1433 IA64_ALT_DATA_TLB_VECTOR;
1436 IA64FAULT vcpu_rfi(VCPU * vcpu)
1438 u64 ifs;
1439 REGS *regs = vcpu_regs(vcpu);
1441 vcpu_set_psr(vcpu, PSCB(vcpu, ipsr));
1443 ifs = PSCB(vcpu, ifs);
1444 if (ifs & 0x8000000000000000UL)
1445 regs->cr_ifs = ifs;
1447 regs->cr_iip = PSCB(vcpu, iip);
1449 return IA64_NO_FAULT;
1452 IA64FAULT vcpu_cover(VCPU * vcpu)
1454 // TODO: Only allowed for current vcpu
1455 REGS *regs = vcpu_regs(vcpu);
1457 if (!PSCB(vcpu, interrupt_collection_enabled)) {
1458 PSCB(vcpu, ifs) = regs->cr_ifs;
1460 regs->cr_ifs = 0;
1461 return IA64_NO_FAULT;
1464 IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
1466 u64 pta = PSCB(vcpu, pta);
1467 u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1468 u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
1469 u64 Mask = (1L << pta_sz) - 1;
1470 u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1471 u64 compMask_60_15 = ~Mask_60_15;
1472 u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
1473 u64 VHPT_offset = (vadr >> rr_ps) << 3;
1474 u64 VHPT_addr1 = vadr & 0xe000000000000000L;
1475 u64 VHPT_addr2a =
1476 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1477 u64 VHPT_addr2b =
1478 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1479 u64 VHPT_addr3 = VHPT_offset & 0x7fff;
1480 u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1481 VHPT_addr3;
1483 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1484 *pval = VHPT_addr;
1485 return IA64_NO_FAULT;
1488 IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
1490 printk("vcpu_ttag: ttag instruction unsupported\n");
1491 return IA64_ILLOP_FAULT;
1494 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1496 /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps. */
1497 static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
1499 return (b1 <= e2) && (e1 >= b2);
1502 /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
1503 Note: LSBs of base inside page_size are ignored. */
1504 static inline void
1505 check_xen_space_overlap(const char *func, u64 base, u64 page_size)
1507 /* Overlaps can occur only in region 7.
1508 (This is an optimization to bypass all the checks). */
1509 if (REGION_NUMBER(base) != 7)
1510 return;
1512 /* Mask LSBs of base. */
1513 base &= ~(page_size - 1);
1515 /* FIXME: ideally an MCA should be generated... */
1516 if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
1517 base, base + page_size)
1518 || range_overlap(current->domain->arch.shared_info_va,
1519 current->domain->arch.shared_info_va
1520 + XSI_SIZE + XMAPPEDREGS_SIZE,
1521 base, base + page_size))
1522 panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
1523 func, base);
1526 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
1527 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
1528 u64 rid)
1530 return trp->rid == rid
1531 && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
1534 static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
1536 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
1539 static inline int
1540 vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
1542 return trp->rid == rid
1543 && trp->pte.p
1544 && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
1548 static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
1549 BOOLEAN is_data)
1551 unsigned char *regions;
1552 TR_ENTRY *trp;
1553 int tr_max;
1554 int i;
1556 if (is_data) {
1557 // data
1558 regions = &vcpu->arch.dtr_regions;
1559 trp = vcpu->arch.dtrs;
1560 tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
1561 } else {
1562 // instruction
1563 regions = &vcpu->arch.itr_regions;
1564 trp = vcpu->arch.itrs;
1565 tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
1568 if (!vcpu_quick_region_check(*regions, va)) {
1569 return NULL;
1571 for (i = 0; i < tr_max; i++, trp++) {
1572 if (vcpu_match_tr_entry(trp, va, rid)) {
1573 return trp;
1576 return NULL;
1579 // return value
1580 // 0: failure
1581 // 1: success
1582 int
1583 vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
1584 IA64_BUNDLE * bundle)
1586 u64 gpip; // guest pseudo phyiscal ip
1587 unsigned long vaddr;
1588 struct page_info *page;
1590 again:
1591 #if 0
1592 // Currently xen doesn't track psr.it bits.
1593 // it assumes always psr.it = 1.
1594 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
1595 gpip = gip;
1596 } else
1597 #endif
1599 unsigned long region = REGION_NUMBER(gip);
1600 unsigned long rr = PSCB(vcpu, rrs)[region];
1601 unsigned long rid = rr & RR_RID_MASK;
1602 BOOLEAN swap_rr0;
1603 TR_ENTRY *trp;
1605 // vcpu->arch.{i, d}tlb are volatile,
1606 // copy its value to the variable, tr, before use.
1607 TR_ENTRY tr;
1609 trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
1610 if (trp != NULL) {
1611 tr = *trp;
1612 goto found;
1614 // When it failed to get a bundle, itlb miss is reflected.
1615 // Last itc.i value is cached to PSCBX(vcpu, itlb).
1616 tr = PSCBX(vcpu, itlb);
1617 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1618 //dprintk(XENLOG_WARNING,
1619 // "%s gip 0x%lx gpip 0x%lx\n", __func__,
1620 // gip, gpip);
1621 goto found;
1623 trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
1624 if (trp != NULL) {
1625 tr = *trp;
1626 goto found;
1628 #if 0
1629 tr = PSCBX(vcpu, dtlb);
1630 if (vcpu_match_tr_entry(&tr, gip, rid)) {
1631 goto found;
1633 #endif
1635 // try to access gip with guest virtual address
1636 // This may cause tlb miss. see vcpu_translate(). Be careful!
1637 swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
1638 if (swap_rr0) {
1639 set_virtual_rr0();
1641 *bundle = __get_domain_bundle(gip);
1642 if (swap_rr0) {
1643 set_metaphysical_rr0();
1645 if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
1646 dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);
1647 return 0;
1649 return 1;
1651 found:
1652 gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
1653 (gip & ((1 << tr.ps) - 1));
1656 vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
1657 page = virt_to_page(vaddr);
1658 if (get_page(page, vcpu->domain) == 0) {
1659 if (page_get_owner(page) != vcpu->domain) {
1660 // This page might be a page granted by another
1661 // domain.
1662 panic_domain(regs, "domain tries to execute foreign "
1663 "domain page which might be mapped by "
1664 "grant table.\n");
1666 goto again;
1668 *bundle = *((IA64_BUNDLE *) vaddr);
1669 put_page(page);
1670 return 1;
1673 IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
1674 u64 * pteval, u64 * itir, u64 * iha)
1676 unsigned long region = address >> 61;
1677 unsigned long pta, rid, rr, key = 0;
1678 union pte_flags pte;
1679 TR_ENTRY *trp;
1681 if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
1682 // dom0 may generate an uncacheable physical address (msb=1)
1683 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1684 // FIXME: This seems to happen even though it shouldn't. Need to track
1685 // this down, but since it has been apparently harmless, just flag it for now
1686 // panic_domain(vcpu_regs(vcpu),
1688 /*
1689 * Guest may execute itc.d and rfi with psr.dt=0
1690 * When VMM try to fetch opcode, tlb miss may happen,
1691 * At this time PSCB(vcpu,metaphysical_mode)=1,
1692 * region=5,VMM need to handle this tlb miss as if
1693 * PSCB(vcpu,metaphysical_mode)=0
1694 */
1695 printk("vcpu_translate: bad physical address: 0x%lx "
1696 "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
1698 } else {
1699 *pteval = (address & _PAGE_PPN_MASK) |
1700 __DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
1701 *itir = vcpu->arch.vhpt_pg_shift << 2;
1702 perfc_incr(phys_translate);
1703 return IA64_NO_FAULT;
1705 } else if (!region && warn_region0_address) {
1706 REGS *regs = vcpu_regs(vcpu);
1707 unsigned long viip = PSCB(vcpu, iip);
1708 unsigned long vipsr = PSCB(vcpu, ipsr);
1709 unsigned long iip = regs->cr_iip;
1710 unsigned long ipsr = regs->cr_ipsr;
1711 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
1712 "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1713 address, viip, vipsr, iip, ipsr);
1716 rr = PSCB(vcpu, rrs)[region];
1717 rid = rr & RR_RID_MASK;
1718 if (is_data) {
1719 trp = vcpu_tr_lookup(vcpu, address, rid, 1);
1720 if (trp != NULL) {
1721 *pteval = trp->pte.val;
1722 *itir = trp->itir;
1723 perfc_incr(tr_translate);
1724 return IA64_NO_FAULT;
1727 // FIXME?: check itr's for data accesses too, else bad things happen?
1728 /* else */ {
1729 trp = vcpu_tr_lookup(vcpu, address, rid, 0);
1730 if (trp != NULL) {
1731 *pteval = trp->pte.val;
1732 *itir = trp->itir;
1733 perfc_incr(tr_translate);
1734 return IA64_NO_FAULT;
1738 /* check 1-entry TLB */
1739 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1740 trp = &vcpu->arch.dtlb;
1741 pte = trp->pte;
1742 if ( /* is_data && */ pte.p
1743 && vcpu_match_tr_entry_no_p(trp, address, rid)) {
1744 *pteval = pte.val;
1745 *itir = trp->itir;
1746 perfc_incr(dtlb_translate);
1747 return IA64_USE_TLB;
1750 /* check guest VHPT */
1751 pta = PSCB(vcpu, pta);
1753 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1754 // note: architecturally, iha is optionally set for alt faults but
1755 // xenlinux depends on it so should document it as part of PV interface
1756 vcpu_thash(vcpu, address, iha);
1757 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
1758 REGS *regs = vcpu_regs(vcpu);
1759 struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);
1761 /* Optimization for identity mapped region 7 OS (linux) */
1762 if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7 &&
1763 region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
1764 pte.val = address & _PAGE_PPN_MASK;
1765 pte.val = pte.val | optf->im_reg7.pgprot;
1766 key = optf->im_reg7.key;
1767 goto out;
1769 return is_data ? IA64_ALT_DATA_TLB_VECTOR :
1770 IA64_ALT_INST_TLB_VECTOR;
1773 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1774 /*
1775 * minimal support: vhpt walker is really dumb and won't find
1776 * anything
1777 */
1778 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1780 /* avoid recursively walking (short format) VHPT */
1781 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1782 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1784 if (!__access_ok(*iha)
1785 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1786 // virtual VHPT walker "missed" in TLB
1787 return IA64_VHPT_FAULT;
1789 /*
1790 * Optimisation: this VHPT walker aborts on not-present pages
1791 * instead of inserting a not-present translation, this allows
1792 * vectoring directly to the miss handler.
1793 */
1794 if (!pte.p)
1795 return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
1797 /* found mapping in guest VHPT! */
1798 out:
1799 *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY);
1800 *pteval = pte.val;
1801 perfc_incr(vhpt_translate);
1802 return IA64_NO_FAULT;
1805 IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
1807 u64 pteval, itir, mask, iha;
1808 IA64FAULT fault;
1810 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1811 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
1812 mask = itir_mask(itir);
1813 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1814 return IA64_NO_FAULT;
1816 return vcpu_force_data_miss(vcpu, vadr);
1819 IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
1821 u64 pteval, itir, iha;
1822 IA64FAULT fault;
1824 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
1825 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
1826 *key = itir & IA64_ITIR_KEY_MASK;
1827 else
1828 *key = 1;
1830 return IA64_NO_FAULT;
1833 /**************************************************************************
1834 VCPU debug breakpoint register access routines
1835 **************************************************************************/
1837 IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
1839 if (reg >= IA64_NUM_DBG_REGS)
1840 return IA64_RSVDREG_FAULT;
1841 if ((reg & 1) == 0) {
1842 /* Validate address. */
1843 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1844 return IA64_ILLOP_FAULT;
1845 } else {
1846 if (!VMX_DOMAIN(vcpu)) {
1847 /* Mask PL0. */
1848 val &= ~(1UL << 56);
1851 if (val != 0)
1852 vcpu->arch.dbg_used |= (1 << reg);
1853 else
1854 vcpu->arch.dbg_used &= ~(1 << reg);
1855 vcpu->arch.dbr[reg] = val;
1856 if (vcpu == current)
1857 ia64_set_dbr(reg, val);
1858 return IA64_NO_FAULT;
1861 IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
1863 if (reg >= IA64_NUM_DBG_REGS)
1864 return IA64_RSVDREG_FAULT;
1865 if ((reg & 1) == 0) {
1866 /* Validate address. */
1867 if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
1868 return IA64_ILLOP_FAULT;
1869 } else {
1870 if (!VMX_DOMAIN(vcpu)) {
1871 /* Mask PL0. */
1872 val &= ~(1UL << 56);
1875 if (val != 0)
1876 vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));
1877 else
1878 vcpu->arch.dbg_used &= ~(1 << (reg + IA64_NUM_DBG_REGS));
1879 vcpu->arch.ibr[reg] = val;
1880 if (vcpu == current)
1881 ia64_set_ibr(reg, val);
1882 return IA64_NO_FAULT;
1885 IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
1887 if (reg >= IA64_NUM_DBG_REGS)
1888 return IA64_RSVDREG_FAULT;
1889 *pval = vcpu->arch.dbr[reg];
1890 return IA64_NO_FAULT;
1893 IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
1895 if (reg >= IA64_NUM_DBG_REGS)
1896 return IA64_RSVDREG_FAULT;
1897 *pval = vcpu->arch.ibr[reg];
1898 return IA64_NO_FAULT;
1901 /**************************************************************************
1902 VCPU performance monitor register access routines
1903 **************************************************************************/
1905 IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
1907 // TODO: Should set Logical CPU state, not just physical
1908 // NOTE: Writes to unimplemented PMC registers are discarded
1909 #ifdef DEBUG_PFMON
1910 printk("vcpu_set_pmc(%x,%lx)\n", reg, val);
1911 #endif
1912 ia64_set_pmc(reg, val);
1913 return IA64_NO_FAULT;
1916 IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
1918 // TODO: Should set Logical CPU state, not just physical
1919 // NOTE: Writes to unimplemented PMD registers are discarded
1920 #ifdef DEBUG_PFMON
1921 printk("vcpu_set_pmd(%x,%lx)\n", reg, val);
1922 #endif
1923 ia64_set_pmd(reg, val);
1924 return IA64_NO_FAULT;
1927 IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
1929 // NOTE: Reads from unimplemented PMC registers return zero
1930 u64 val = (u64) ia64_get_pmc(reg);
1931 #ifdef DEBUG_PFMON
1932 printk("%lx=vcpu_get_pmc(%x)\n", val, reg);
1933 #endif
1934 *pval = val;
1935 return IA64_NO_FAULT;
1938 IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
1940 // NOTE: Reads from unimplemented PMD registers return zero
1941 u64 val = (u64) ia64_get_pmd(reg);
1942 #ifdef DEBUG_PFMON
1943 printk("%lx=vcpu_get_pmd(%x)\n", val, reg);
1944 #endif
1945 *pval = val;
1946 return IA64_NO_FAULT;
1949 /**************************************************************************
1950 VCPU banked general register access routines
1951 **************************************************************************/
1952 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1953 do{ \
1954 __asm__ __volatile__ ( \
1955 ";;extr.u %0 = %3,%6,16;;\n" \
1956 "dep %1 = %0, %1, 0, 16;;\n" \
1957 "st8 [%4] = %1\n" \
1958 "extr.u %0 = %2, 16, 16;;\n" \
1959 "dep %3 = %0, %3, %6, 16;;\n" \
1960 "st8 [%5] = %3\n" \
1961 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1962 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1963 }while(0)
1965 IA64FAULT vcpu_bsw0(VCPU * vcpu)
1967 // TODO: Only allowed for current vcpu
1968 REGS *regs = vcpu_regs(vcpu);
1969 unsigned long *r = &regs->r16;
1970 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
1971 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
1972 unsigned long *runat = &regs->eml_unat;
1973 unsigned long *b0unat = &PSCB(vcpu, vbnat);
1974 unsigned long *b1unat = &PSCB(vcpu, vnat);
1976 unsigned long i;
1978 if (VMX_DOMAIN(vcpu)) {
1979 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1980 for (i = 0; i < 16; i++) {
1981 *b1++ = *r;
1982 *r++ = *b0++;
1984 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1985 IA64_PT_REGS_R16_SLOT);
1986 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1988 } else {
1989 if (PSCB(vcpu, banknum)) {
1990 for (i = 0; i < 16; i++) {
1991 *b1++ = *r;
1992 *r++ = *b0++;
1994 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1995 IA64_PT_REGS_R16_SLOT);
1996 PSCB(vcpu, banknum) = 0;
1999 return IA64_NO_FAULT;
2002 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT) \
2003 do { \
2004 __asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n" \
2005 "dep %1 = %0, %1, 16, 16;;\n" \
2006 "st8 [%4] = %1\n" \
2007 "extr.u %0 = %2, 0, 16;;\n" \
2008 "dep %3 = %0, %3, %6, 16;;\n" \
2009 "st8 [%5] = %3\n" \
2010 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
2011 "r"(*runat), "r"(b0unat), "r"(runat), \
2012 "i"(IA64_PT_REGS_R16_SLOT): "memory"); \
2013 } while(0)
2015 IA64FAULT vcpu_bsw1(VCPU * vcpu)
2017 // TODO: Only allowed for current vcpu
2018 REGS *regs = vcpu_regs(vcpu);
2019 unsigned long *r = &regs->r16;
2020 unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
2021 unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
2022 unsigned long *runat = &regs->eml_unat;
2023 unsigned long *b0unat = &PSCB(vcpu, vbnat);
2024 unsigned long *b1unat = &PSCB(vcpu, vnat);
2026 unsigned long i;
2028 if (VMX_DOMAIN(vcpu)) {
2029 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
2030 for (i = 0; i < 16; i++) {
2031 *b0++ = *r;
2032 *r++ = *b1++;
2034 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2035 IA64_PT_REGS_R16_SLOT);
2036 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
2038 } else {
2039 if (!PSCB(vcpu, banknum)) {
2040 for (i = 0; i < 16; i++) {
2041 *b0++ = *r;
2042 *r++ = *b1++;
2044 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
2045 IA64_PT_REGS_R16_SLOT);
2046 PSCB(vcpu, banknum) = 1;
2049 return IA64_NO_FAULT;
2052 /**************************************************************************
2053 VCPU cpuid access routines
2054 **************************************************************************/
2056 IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
2058 // FIXME: This could get called as a result of a rsvd-reg fault
2059 // if reg > 3
2060 switch (reg) {
2061 case 0:
2062 memcpy(pval, "Xen/ia64", 8);
2063 break;
2064 case 1:
2065 *pval = 0;
2066 break;
2067 case 2:
2068 *pval = 0;
2069 break;
2070 case 3:
2071 *pval = ia64_get_cpuid(3);
2072 break;
2073 case 4:
2074 *pval = ia64_get_cpuid(4);
2075 break;
2076 default:
2077 if (reg > (ia64_get_cpuid(3) & 0xff))
2078 return IA64_RSVDREG_FAULT;
2079 *pval = ia64_get_cpuid(reg);
2080 break;
2082 return IA64_NO_FAULT;
2085 /**************************************************************************
2086 VCPU region register access routines
2087 **************************************************************************/
2089 unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
2091 ia64_rr rr;
2093 rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
2094 return rr.ve;
2097 IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
2099 if (unlikely(is_reserved_rr_field(vcpu, val))) {
2100 gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
2101 return IA64_RSVDREG_FAULT;
2104 PSCB(vcpu, rrs)[reg >> 61] = val;
2105 if (likely(vcpu == current)) {
2106 int rc = set_one_rr(reg, val);
2107 BUG_ON(rc == 0);
2109 return IA64_NO_FAULT;
2112 IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
2114 if (VMX_DOMAIN(vcpu))
2115 *pval = VMX(vcpu, vrr[reg >> 61]);
2116 else
2117 *pval = PSCB(vcpu, rrs)[reg >> 61];
2119 return IA64_NO_FAULT;
2122 IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
2123 u64 val3, u64 val4)
2125 u64 reg0 = 0x0000000000000000UL;
2126 u64 reg1 = 0x2000000000000000UL;
2127 u64 reg2 = 0x4000000000000000UL;
2128 u64 reg3 = 0x6000000000000000UL;
2129 u64 reg4 = 0x8000000000000000UL;
2131 if (unlikely(is_reserved_rr_field(vcpu, val0) ||
2132 is_reserved_rr_field(vcpu, val1) ||
2133 is_reserved_rr_field(vcpu, val2) ||
2134 is_reserved_rr_field(vcpu, val3) ||
2135 is_reserved_rr_field(vcpu, val4))) {
2136 gdprintk(XENLOG_DEBUG,
2137 "use of invalid rrval %lx %lx %lx %lx %lx\n",
2138 val0, val1, val2, val3, val4);
2139 return IA64_RSVDREG_FAULT;
2142 PSCB(vcpu, rrs)[reg0 >> 61] = val0;
2143 PSCB(vcpu, rrs)[reg1 >> 61] = val1;
2144 PSCB(vcpu, rrs)[reg2 >> 61] = val2;
2145 PSCB(vcpu, rrs)[reg3 >> 61] = val3;
2146 PSCB(vcpu, rrs)[reg4 >> 61] = val4;
2147 if (likely(vcpu == current)) {
2148 int rc;
2149 rc = !set_one_rr(reg0, val0);
2150 rc |= !set_one_rr(reg1, val1);
2151 rc |= !set_one_rr(reg2, val2);
2152 rc |= !set_one_rr(reg3, val3);
2153 rc |= !set_one_rr(reg4, val4);
2154 BUG_ON(rc != 0);
2156 return IA64_NO_FAULT;
2159 /**************************************************************************
2160 VCPU protection key register access routines
2161 **************************************************************************/
2163 IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
2165 if (reg > XEN_IA64_NPKRS)
2166 return IA64_RSVDREG_FAULT; /* register index to large */
2168 *pval = (u64) PSCBX(vcpu, pkrs[reg]);
2169 return IA64_NO_FAULT;
2172 IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
2174 ia64_pkr_t pkr_new;
2176 if (reg >= XEN_IA64_NPKRS)
2177 return IA64_RSVDREG_FAULT; /* index to large */
2179 pkr_new.val = val;
2180 if (pkr_new.reserved1)
2181 return IA64_RSVDREG_FAULT; /* reserved field */
2183 if (pkr_new.reserved2)
2184 return IA64_RSVDREG_FAULT; /* reserved field */
2186 PSCBX(vcpu, pkrs[reg]) = pkr_new.val;
2187 ia64_set_pkr(reg, pkr_new.val);
2189 return IA64_NO_FAULT;
2192 /**************************************************************************
2193 VCPU translation register access routines
2194 **************************************************************************/
2196 static void
2197 vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
2198 u64 itir, u64 ifa, u64 rid)
2200 u64 ps;
2201 union pte_flags new_pte;
2203 trp->itir = itir;
2204 trp->rid = rid;
2205 ps = trp->ps;
2206 new_pte.val = pte;
2207 if (new_pte.pl < CONFIG_CPL0_EMUL)
2208 new_pte.pl = CONFIG_CPL0_EMUL;
2209 trp->vadr = ifa & ~0xfff;
2210 if (ps > 12) { // "ignore" relevant low-order bits
2211 new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
2212 trp->vadr &= ~((1UL << ps) - 1);
2215 /* Atomic write. */
2216 trp->pte.val = new_pte.val;
2219 static inline void
2220 vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
2222 vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
2223 VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
2226 IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
2227 u64 itir, u64 ifa)
2229 TR_ENTRY *trp;
2231 if (slot >= NDTRS)
2232 return IA64_RSVDREG_FAULT;
2234 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2236 trp = &PSCBX(vcpu, dtrs[slot]);
2237 //printk("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
2238 vcpu_set_tr_entry(trp, pte, itir, ifa);
2239 vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
2241 /*
2242 * FIXME According to spec, vhpt should be purged, but this
2243 * incurs considerable performance loss, since it is safe for
2244 * linux not to purge vhpt, vhpt purge is disabled until a
2245 * feasible way is found.
2247 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2248 */
2250 return IA64_NO_FAULT;
2253 IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
2254 u64 itir, u64 ifa)
2256 TR_ENTRY *trp;
2258 if (slot >= NITRS)
2259 return IA64_RSVDREG_FAULT;
2261 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2263 trp = &PSCBX(vcpu, itrs[slot]);
2264 //printk("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
2265 vcpu_set_tr_entry(trp, pte, itir, ifa);
2266 vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
2268 /*
2269 * FIXME According to spec, vhpt should be purged, but this
2270 * incurs considerable performance loss, since it is safe for
2271 * linux not to purge vhpt, vhpt purge is disabled until a
2272 * feasible way is found.
2274 * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
2275 */
2277 return IA64_NO_FAULT;
2280 IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
2281 u64 itir, u64 ifa, u64 rid)
2283 TR_ENTRY *trp;
2285 if (slot >= NITRS)
2286 return IA64_RSVDREG_FAULT;
2287 trp = &PSCBX(vcpu, itrs[slot]);
2288 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2290 /* Recompute the itr_region. */
2291 vcpu->arch.itr_regions = 0;
2292 for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)
2293 if (trp->pte.p)
2294 vcpu_quick_region_set(vcpu->arch.itr_regions,
2295 trp->vadr);
2296 return IA64_NO_FAULT;
2299 IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
2300 u64 itir, u64 ifa, u64 rid)
2302 TR_ENTRY *trp;
2304 if (slot >= NDTRS)
2305 return IA64_RSVDREG_FAULT;
2306 trp = &PSCBX(vcpu, dtrs[slot]);
2307 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
2309 /* Recompute the dtr_region. */
2310 vcpu->arch.dtr_regions = 0;
2311 for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)
2312 if (trp->pte.p)
2313 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2314 trp->vadr);
2315 return IA64_NO_FAULT;
2318 /**************************************************************************
2319 VCPU translation cache access routines
2320 **************************************************************************/
2322 static void
2323 vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
2325 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
2326 printk("vhpt rebuild: using page_shift %d\n", (int)ps);
2327 vcpu->arch.vhpt_pg_shift = ps;
2328 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2329 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2330 local_vhpt_flush();
2331 load_region_regs(vcpu);
2332 #else
2333 panic_domain(NULL, "domain trying to use smaller page size!\n");
2334 #endif
2337 void
2338 vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
2339 u64 mp_pte, u64 itir, struct p2m_entry *entry)
2341 ia64_itir_t _itir = {.itir = itir};
2342 unsigned long psr;
2344 check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
2346 // FIXME, must be inlined or potential for nested fault here!
2347 if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
2348 panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
2349 "smaller page size!\n");
2351 BUG_ON(_itir.ps > PAGE_SHIFT);
2352 vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
2353 psr = ia64_clear_ic();
2354 pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
2355 // FIXME: look for bigger mappings
2356 ia64_itc(IorD, vaddr, pte, _itir.itir);
2357 ia64_set_psr(psr);
2358 // ia64_srlz_i(); // no srls req'd, will rfi later
2359 if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
2360 // FIXME: this is dangerous... vhpt_flush_address ensures these
2361 // addresses never get flushed. More work needed if this
2362 // ever happens.
2363 //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
2364 if (_itir.ps > vcpu->arch.vhpt_pg_shift)
2365 vhpt_multiple_insert(vaddr, pte, _itir.itir);
2366 else
2367 vhpt_insert(vaddr, pte, _itir.itir);
2369 // even if domain pagesize is larger than PAGE_SIZE, just put
2370 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
2371 else {
2372 vhpt_insert(vaddr, pte, _itir.itir);
2376 IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2378 unsigned long pteval;
2379 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2380 struct p2m_entry entry;
2381 ia64_itir_t _itir = {.itir = itir};
2383 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2384 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2386 again:
2387 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2388 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2389 if (!pteval)
2390 return IA64_ILLOP_FAULT;
2391 if (swap_rr0)
2392 set_virtual_rr0();
2393 vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
2394 if (swap_rr0)
2395 set_metaphysical_rr0();
2396 if (p2m_entry_retry(&entry)) {
2397 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2398 goto again;
2400 vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
2401 return IA64_NO_FAULT;
2404 IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
2406 unsigned long pteval;
2407 BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
2408 struct p2m_entry entry;
2409 ia64_itir_t _itir = {.itir = itir};
2411 if (_itir.ps < vcpu->arch.vhpt_pg_shift)
2412 vcpu_rebuild_vhpt(vcpu, _itir.ps);
2414 again:
2415 //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
2416 pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
2417 if (!pteval)
2418 return IA64_ILLOP_FAULT;
2419 if (swap_rr0)
2420 set_virtual_rr0();
2421 vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
2422 if (swap_rr0)
2423 set_metaphysical_rr0();
2424 if (p2m_entry_retry(&entry)) {
2425 vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
2426 goto again;
2428 vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
2429 return IA64_NO_FAULT;
2432 IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
2434 BUG_ON(vcpu != current);
2436 check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
2438 /* Purge TC */
2439 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2440 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2442 /* Purge all tlb and vhpt */
2443 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2445 return IA64_NO_FAULT;
2448 // At privlvl=0, fc performs no access rights or protection key checks, while
2449 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
2450 // read but no protection key check. Thus in order to avoid an unexpected
2451 // access rights fault, we have to translate the virtual address to a
2452 // physical address (possibly via a metaphysical address) and do the fc
2453 // on the physical address, which is guaranteed to flush the same cache line
2454 IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
2456 // TODO: Only allowed for current vcpu
2457 u64 mpaddr, paddr;
2458 IA64FAULT fault;
2460 again:
2461 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
2462 if (fault == IA64_NO_FAULT) {
2463 struct p2m_entry entry;
2464 paddr = translate_domain_mpaddr(mpaddr, &entry);
2465 ia64_fc(__va(paddr));
2466 if (p2m_entry_retry(&entry))
2467 goto again;
2469 return fault;
2472 IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
2474 // Note that this only needs to be called once, i.e. the
2475 // architected loop to purge the entire TLB, should use
2476 // base = stride1 = stride2 = 0, count0 = count 1 = 1
2478 vcpu_flush_vtlb_all(current);
2480 return IA64_NO_FAULT;
2483 IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
2485 printk("vcpu_ptc_g: called, not implemented yet\n");
2486 return IA64_ILLOP_FAULT;
2489 IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
2491 // FIXME: validate not flushing Xen addresses
2492 // if (Xen address) return(IA64_ILLOP_FAULT);
2493 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
2494 //printk("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
2496 check_xen_space_overlap("ptc_ga", vadr, addr_range);
2498 domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
2500 return IA64_NO_FAULT;
2503 IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
2505 unsigned long region = vadr >> 61;
2506 u64 addr_range = 1UL << log_range;
2507 unsigned long rid, rr;
2508 int i;
2509 TR_ENTRY *trp;
2511 BUG_ON(vcpu != current);
2512 check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
2514 rr = PSCB(vcpu, rrs)[region];
2515 rid = rr & RR_RID_MASK;
2517 /* Purge TC */
2518 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
2520 /* Purge tr and recompute dtr_regions. */
2521 vcpu->arch.dtr_regions = 0;
2522 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
2523 if (vcpu_match_tr_entry_range
2524 (trp, rid, vadr, vadr + addr_range))
2525 vcpu_purge_tr_entry(trp);
2526 else if (trp->pte.p)
2527 vcpu_quick_region_set(vcpu->arch.dtr_regions,
2528 trp->vadr);
2530 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2532 return IA64_NO_FAULT;
2535 IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
2537 unsigned long region = vadr >> 61;
2538 u64 addr_range = 1UL << log_range;
2539 unsigned long rid, rr;
2540 int i;
2541 TR_ENTRY *trp;
2543 BUG_ON(vcpu != current);
2544 check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
2546 rr = PSCB(vcpu, rrs)[region];
2547 rid = rr & RR_RID_MASK;
2549 /* Purge TC */
2550 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
2552 /* Purge tr and recompute itr_regions. */
2553 vcpu->arch.itr_regions = 0;
2554 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
2555 if (vcpu_match_tr_entry_range
2556 (trp, rid, vadr, vadr + addr_range))
2557 vcpu_purge_tr_entry(trp);
2558 else if (trp->pte.p)
2559 vcpu_quick_region_set(vcpu->arch.itr_regions,
2560 trp->vadr);
2562 vcpu_flush_tlb_vhpt_range(vadr, log_range);
2564 return IA64_NO_FAULT;