ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 15662:85c2f2d754ef

[IA64] Use key in optimzation feature

Added IA64_INST_KEY_MISS_VECTOR and IA64_DATA_KEY_MISS_VECTOR to
ia64_handle_reflection. Added using the key in handling
XEN_IA64_OPTF_IDENT_MAP_REG7 in PV.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 30 16:10:17 2007 -0600 (2007-07-30)
parents cbf749e9961f
children 255abff9d1f7
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
16 #include <xen/perfc.h>
17 #include <xen/mm.h>
19 #include <asm/system.h>
20 #include <asm/processor.h>
21 #include <xen/irq.h>
22 #include <xen/event.h>
23 #include <asm/privop.h>
24 #include <asm/vcpu.h>
25 #include <asm/ia64_int.h>
26 #include <asm/dom_fw.h>
27 #include <asm/vhpt.h>
28 #include <asm/debugger.h>
29 #include <asm/fpswa.h>
30 #include <asm/bundle.h>
31 #include <asm/asm-xsi-offsets.h>
32 #include <asm/shadow.h>
33 #include <asm/uaccess.h>
34 #include <asm/p2m_entry.h>
36 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37 /* FIXME: where these declarations shold be there ? */
38 extern int ia64_hyperprivop(unsigned long, REGS *);
39 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41 // note IA64_PSR_PK removed from following, why is this necessary?
42 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
43 IA64_PSR_DT | IA64_PSR_RT | \
44 IA64_PSR_IT | IA64_PSR_BN)
46 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
47 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
48 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
49 IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \
50 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
51 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
53 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
55 // should never panic domain... if it does, stack may have been overrun
56 static void check_bad_nested_interruption(unsigned long isr,
57 struct pt_regs *regs,
58 unsigned long vector)
59 {
60 struct vcpu *v = current;
62 if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
63 panic_domain(regs,
64 "psr.dt off, trying to deliver nested dtlb!\n");
65 }
66 vector &= ~0xf;
67 if (vector != IA64_DATA_TLB_VECTOR &&
68 vector != IA64_ALT_DATA_TLB_VECTOR &&
69 vector != IA64_VHPT_TRANS_VECTOR) {
70 panic_domain(regs, "psr.ic off, delivering fault=%lx,"
71 "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
72 vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
73 isr, PSCB(v, iip));
74 }
75 }
77 static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
78 unsigned long vector)
79 {
80 struct vcpu *v = current;
82 if (!PSCB(v, interrupt_collection_enabled))
83 check_bad_nested_interruption(isr, regs, vector);
84 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
85 PSCB(v, precover_ifs) = regs->cr_ifs;
86 PSCB(v, ipsr) = vcpu_get_psr(v);
87 vcpu_bsw0(v);
88 PSCB(v, isr) = isr;
89 PSCB(v, iip) = regs->cr_iip;
90 PSCB(v, ifs) = 0;
92 regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
93 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
94 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
95 if (PSCB(v, dcr) & IA64_DCR_BE)
96 regs->cr_ipsr |= IA64_PSR_BE;
98 if (PSCB(v, hpsr_dfh))
99 regs->cr_ipsr |= IA64_PSR_DFH;
100 PSCB(v, vpsr_dfh) = 0;
101 v->vcpu_info->evtchn_upcall_mask = 1;
102 PSCB(v, interrupt_collection_enabled) = 0;
104 perfc_incra(slow_reflect, vector >> 8);
105 }
107 void reflect_event(void)
108 {
109 struct vcpu *v = current;
110 struct pt_regs *regs;
111 unsigned long isr;
113 if (!event_pending(v))
114 return;
116 /* Sanity check */
117 if (is_idle_vcpu(v)) {
118 //printk("WARN: invocation to reflect_event in nested xen\n");
119 return;
120 }
122 regs = vcpu_regs(v);
124 isr = regs->cr_ipsr & IA64_PSR_RI;
126 if (!PSCB(v, interrupt_collection_enabled))
127 printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
128 "isr=%lx,viip=0x%lx\n",
129 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
130 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
131 PSCB(v, precover_ifs) = regs->cr_ifs;
132 PSCB(v, ipsr) = vcpu_get_psr(v);
133 vcpu_bsw0(v);
134 PSCB(v, isr) = isr;
135 PSCB(v, iip) = regs->cr_iip;
136 PSCB(v, ifs) = 0;
138 regs->cr_iip = v->arch.event_callback_ip;
139 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
140 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
141 if (PSCB(v, dcr) & IA64_DCR_BE)
142 regs->cr_ipsr |= IA64_PSR_BE;
144 if (PSCB(v, hpsr_dfh))
145 regs->cr_ipsr |= IA64_PSR_DFH;
146 PSCB(v, vpsr_dfh) = 0;
147 v->vcpu_info->evtchn_upcall_mask = 1;
148 PSCB(v, interrupt_collection_enabled) = 0;
149 }
151 static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
152 {
153 if (!PSCB(v, interrupt_collection_enabled)) {
154 PSCB(v, ifs) = regs->cr_ifs;
155 regs->cr_ifs = 0;
156 perfc_incr(lazy_cover);
157 return 1; // retry same instruction with cr.ifs off
158 }
159 return 0;
160 }
162 void ia64_do_page_fault(unsigned long address, unsigned long isr,
163 struct pt_regs *regs, unsigned long itir)
164 {
165 unsigned long iip = regs->cr_iip, iha;
166 // FIXME should validate address here
167 unsigned long pteval;
168 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
169 IA64FAULT fault;
170 int is_ptc_l_needed = 0;
171 u64 logps;
173 if ((isr & IA64_ISR_SP)
174 || ((isr & IA64_ISR_NA)
175 && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
176 /*
177 * This fault was due to a speculative load or lfetch.fault,
178 * set the "ed" bit in the psr to ensure forward progress.
179 * (Target register will get a NaT for ld.s, lfetch will be
180 * canceled.)
181 */
182 ia64_psr(regs)->ed = 1;
183 return;
184 }
186 again:
187 fault = vcpu_translate(current, address, is_data, &pteval,
188 &itir, &iha);
189 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
190 struct p2m_entry entry;
191 unsigned long m_pteval;
192 m_pteval = translate_domain_pte(pteval, address, itir,
193 &logps, &entry);
194 vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
195 m_pteval, pteval, logps, &entry);
196 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
197 p2m_entry_retry(&entry)) {
198 /* dtlb has been purged in-between. This dtlb was
199 matching. Undo the work. */
200 vcpu_flush_tlb_vhpt_range(address, logps);
202 // the stale entry which we inserted above
203 // may remains in tlb cache.
204 // we don't purge it now hoping next itc purges it.
205 is_ptc_l_needed = 1;
206 goto again;
207 }
208 return;
209 }
211 if (is_ptc_l_needed)
212 vcpu_ptc_l(current, address, logps);
213 if (!guest_mode(regs)) {
214 /* The fault occurs inside Xen. */
215 if (!ia64_done_with_exception(regs)) {
216 // should never happen. If it does, region 0 addr may
217 // indicate a bad xen pointer
218 printk("*** xen_handle_domain_access: exception table"
219 " lookup failed, iip=0x%lx, addr=0x%lx, "
220 "spinning...\n", iip, address);
221 panic_domain(regs, "*** xen_handle_domain_access: "
222 "exception table lookup failed, "
223 "iip=0x%lx, addr=0x%lx, spinning...\n",
224 iip, address);
225 }
226 return;
227 }
229 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
230 return;
232 if (!PSCB(current, interrupt_collection_enabled)) {
233 check_bad_nested_interruption(isr, regs, fault);
234 //printk("Delivering NESTED DATA TLB fault\n");
235 fault = IA64_DATA_NESTED_TLB_VECTOR;
236 regs->cr_iip =
237 ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
238 regs->cr_ipsr =
239 (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
240 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
241 IA64_PSR_CPL0_BIT);
243 if (PSCB(current, hpsr_dfh))
244 regs->cr_ipsr |= IA64_PSR_DFH;
245 PSCB(current, vpsr_dfh) = 0;
246 perfc_incra(slow_reflect, fault >> 8);
247 return;
248 }
250 PSCB(current, itir) = itir;
251 PSCB(current, iha) = iha;
252 PSCB(current, ifa) = address;
253 reflect_interruption(isr, regs, fault);
254 }
256 fpswa_interface_t *fpswa_interface = 0;
258 void __init trap_init(void)
259 {
260 if (ia64_boot_param->fpswa)
261 /* FPSWA fixup: make the interface pointer a virtual address */
262 fpswa_interface = __va(ia64_boot_param->fpswa);
263 else
264 printk("No FPSWA supported.\n");
265 }
267 static fpswa_ret_t
268 fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
269 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
270 unsigned long *ifs, struct pt_regs *regs)
271 {
272 fp_state_t fp_state;
273 fpswa_ret_t ret;
275 if (!fpswa_interface)
276 return (fpswa_ret_t) {-1, 0, 0, 0};
278 memset(&fp_state, 0, sizeof(fp_state_t));
280 /*
281 * compute fp_state. only FP registers f6 - f11 are used by the
282 * kernel, so set those bits in the mask and set the low volatile
283 * pointer to point to these registers.
284 */
285 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
287 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
288 /*
289 * unsigned long (*EFI_FPSWA) (
290 * unsigned long trap_type,
291 * void *Bundle,
292 * unsigned long *pipsr,
293 * unsigned long *pfsr,
294 * unsigned long *pisr,
295 * unsigned long *ppreds,
296 * unsigned long *pifs,
297 * void *fp_state);
298 */
299 ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
300 ipsr, fpsr, isr, pr, ifs, &fp_state);
302 return ret;
303 }
305 /*
306 * Handle floating-point assist faults and traps for domain.
307 */
308 unsigned long
309 handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
310 {
311 struct vcpu *v = current;
312 IA64_BUNDLE bundle;
313 unsigned long fault_ip;
314 fpswa_ret_t ret;
316 fault_ip = regs->cr_iip;
317 /*
318 * When the FP trap occurs, the trapping instruction is completed.
319 * If ipsr.ri == 0, there is the trapping instruction in previous
320 * bundle.
321 */
322 if (!fp_fault && (ia64_psr(regs)->ri == 0))
323 fault_ip -= 16;
325 if (VMX_DOMAIN(current)) {
326 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
327 return IA64_RETRY;
328 } else
329 bundle = __get_domain_bundle(fault_ip);
331 if (!bundle.i64[0] && !bundle.i64[1]) {
332 printk("%s: floating-point bundle at 0x%lx not mapped\n",
333 __FUNCTION__, fault_ip);
334 return -1;
335 }
337 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
338 &isr, &regs->pr, &regs->cr_ifs, regs);
340 if (ret.status) {
341 PSCBX(v, fpswa_ret) = ret;
342 printk("%s(%s): fp_emulate() returned %ld\n",
343 __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
344 }
346 return ret.status;
347 }
349 void
350 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
351 unsigned long iim, unsigned long itir, unsigned long arg5,
352 unsigned long arg6, unsigned long arg7, unsigned long stack)
353 {
354 struct pt_regs *regs = (struct pt_regs *)&stack;
355 unsigned long code;
356 static const char *const reason[] = {
357 "IA-64 Illegal Operation fault",
358 "IA-64 Privileged Operation fault",
359 "IA-64 Privileged Register fault",
360 "IA-64 Reserved Register/Field fault",
361 "Disabled Instruction Set Transition fault",
362 "Unknown fault 5", "Unknown fault 6",
363 "Unknown fault 7", "Illegal Hazard fault",
364 "Unknown fault 9", "Unknown fault 10",
365 "Unknown fault 11", "Unknown fault 12",
366 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
367 };
369 printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
370 "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
371 regs->cr_iip, regs->cr_ipsr, isr);
373 if ((isr & IA64_ISR_NA) &&
374 ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
375 /*
376 * This fault was due to lfetch.fault, set "ed" bit in the
377 * psr to cancel the lfetch.
378 */
379 ia64_psr(regs)->ed = 1;
380 printk("ia64_fault: handled lfetch.fault\n");
381 return;
382 }
384 switch (vector) {
385 case 0:
386 printk("VHPT Translation.\n");
387 break;
389 case 4:
390 printk("Alt DTLB.\n");
391 break;
393 case 6:
394 printk("Instruction Key Miss.\n");
395 break;
397 case 7:
398 printk("Data Key Miss.\n");
399 break;
401 case 8:
402 printk("Dirty-bit.\n");
403 break;
405 case 20:
406 printk("Page Not Found.\n");
407 break;
409 case 21:
410 printk("Key Permission.\n");
411 break;
413 case 22:
414 printk("Instruction Access Rights.\n");
415 break;
417 case 24: /* General Exception */
418 code = (isr >> 4) & 0xf;
419 printk("General Exception: %s%s.\n", reason[code],
420 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
421 " (data access)") : "");
422 if (code == 8) {
423 #ifdef CONFIG_IA64_PRINT_HAZARDS
424 printk("%s[%d]: possible hazard @ ip=%016lx "
425 "(pr = %016lx)\n", current->comm, current->pid,
426 regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
427 #endif
428 printk("ia64_fault: returning on hazard\n");
429 return;
430 }
431 break;
433 case 25:
434 printk("Disabled FP-Register.\n");
435 break;
437 case 26:
438 printk("NaT consumption.\n");
439 break;
441 case 29:
442 printk("Debug.\n");
443 break;
445 case 30:
446 printk("Unaligned Reference.\n");
447 break;
449 case 31:
450 printk("Unsupported data reference.\n");
451 break;
453 case 32:
454 printk("Floating-Point Fault.\n");
455 break;
457 case 33:
458 printk("Floating-Point Trap.\n");
459 break;
461 case 34:
462 printk("Lower Privilege Transfer Trap.\n");
463 break;
465 case 35:
466 printk("Taken Branch Trap.\n");
467 break;
469 case 36:
470 printk("Single Step Trap.\n");
471 break;
473 case 45:
474 printk("IA-32 Exception.\n");
475 break;
477 case 46:
478 printk("IA-32 Intercept.\n");
479 break;
481 case 47:
482 printk("IA-32 Interrupt.\n");
483 break;
485 default:
486 printk("Fault %lu\n", vector);
487 break;
488 }
490 show_registers(regs);
491 panic("Fault in Xen.\n");
492 }
494 /* Also read in hyperprivop.S */
495 int first_break = 0;
497 void
498 ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
499 unsigned long iim)
500 {
501 struct domain *d = current->domain;
502 struct vcpu *v = current;
503 IA64FAULT vector;
505 /* FIXME: don't hardcode constant */
506 if ((iim == 0x80001 || iim == 0x80002)
507 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
508 do_ssc(vcpu_get_gr(current, 36), regs);
509 }
510 #ifdef CRASH_DEBUG
511 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
512 if (iim == 0)
513 show_registers(regs);
514 debugger_trap_fatal(0 /* don't care */ , regs);
515 }
516 #endif
517 else if (iim == d->arch.breakimm &&
518 ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
519 /* by default, do not continue */
520 v->arch.hypercall_continuation = 0;
522 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
523 if (!PSCBX(v, hypercall_continuation))
524 vcpu_increment_iip(current);
525 } else
526 reflect_interruption(isr, regs, vector);
527 } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
528 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
529 if (ia64_hyperprivop(iim, regs))
530 vcpu_increment_iip(current);
531 } else {
532 if (iim == 0)
533 die_if_kernel("bug check", regs, iim);
534 PSCB(v, iim) = iim;
535 reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
536 }
537 }
539 void
540 ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
541 unsigned long itir)
542 {
543 IA64FAULT vector;
545 vector = priv_emulate(current, regs, isr);
546 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
547 // Note: if a path results in a vector to reflect that requires
548 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
549 /*
550 * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
551 * see IA64_ILLOP_FAULT, ...
552 */
553 if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
554 isr = vector & 0xffUL;
555 vector = IA64_GENEX_VECTOR;
556 }
557 reflect_interruption(isr, regs, vector);
558 }
559 }
561 void
562 ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
563 unsigned long isr, unsigned long iim,
564 unsigned long vector)
565 {
566 struct vcpu *v = current;
567 unsigned long check_lazy_cover = 0;
568 unsigned long psr = regs->cr_ipsr;
569 unsigned long status;
571 /* Following faults shouldn't be seen from Xen itself */
572 BUG_ON(!(psr & IA64_PSR_CPL));
574 switch (vector) {
575 case 6:
576 vector = IA64_INST_KEY_MISS_VECTOR;
577 break;
578 case 7:
579 vector = IA64_DATA_KEY_MISS_VECTOR;
580 break;
581 case 8:
582 vector = IA64_DIRTY_BIT_VECTOR;
583 break;
584 case 9:
585 vector = IA64_INST_ACCESS_BIT_VECTOR;
586 break;
587 case 10:
588 check_lazy_cover = 1;
589 vector = IA64_DATA_ACCESS_BIT_VECTOR;
590 break;
591 case 20:
592 check_lazy_cover = 1;
593 vector = IA64_PAGE_NOT_PRESENT_VECTOR;
594 break;
595 case 22:
596 vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
597 break;
598 case 23:
599 check_lazy_cover = 1;
600 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
601 break;
602 case 24:
603 vector = IA64_GENEX_VECTOR;
604 break;
605 case 25:
606 if (PSCB(v, hpsr_dfh)) {
607 PSCB(v, hpsr_dfh) = 0;
608 PSCB(v, hpsr_mfh) = 1;
609 if (__ia64_per_cpu_var(fp_owner) != v)
610 __ia64_load_fpu(v->arch._thread.fph);
611 }
612 if (!PSCB(v, vpsr_dfh)) {
613 regs->cr_ipsr &= ~IA64_PSR_DFH;
614 return;
615 }
616 vector = IA64_DISABLED_FPREG_VECTOR;
617 break;
618 case 26:
619 if (((isr >> 4L) & 0xfL) == 1) {
620 /* Fault is due to a register NaT consumption fault. */
621 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
622 printk("ia64_handle_reflection: handling regNaT "
623 "fault\n");
624 vector = IA64_NAT_CONSUMPTION_VECTOR;
625 break;
626 }
627 #if 1
628 // pass null pointer dereferences through with no error
629 // but retain debug output for non-zero ifa
630 if (!ifa) {
631 vector = IA64_NAT_CONSUMPTION_VECTOR;
632 break;
633 }
634 #endif
635 #ifdef CONFIG_PRIVIFY
636 /* Some privified operations are coded using reg+64 instead
637 of reg. */
638 printk("*** NaT fault... attempting to handle as privop\n");
639 printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
640 isr, ifa, regs->cr_iip, psr);
641 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
642 // certain NaT faults are higher priority than privop faults
643 vector = priv_emulate(v, regs, isr);
644 if (vector == IA64_NO_FAULT) {
645 printk("*** Handled privop masquerading as NaT "
646 "fault\n");
647 return;
648 }
649 #endif
650 vector = IA64_NAT_CONSUMPTION_VECTOR;
651 break;
652 case 27:
653 //printk("*** Handled speculation vector, itc=%lx!\n",
654 // ia64_get_itc());
655 PSCB(current, iim) = iim;
656 vector = IA64_SPECULATION_VECTOR;
657 break;
658 case 29:
659 vector = IA64_DEBUG_VECTOR;
660 if (debugger_trap_entry(vector,regs))
661 return;
662 break;
663 case 30:
664 // FIXME: Should we handle unaligned refs in Xen??
665 vector = IA64_UNALIGNED_REF_VECTOR;
666 break;
667 case 32:
668 status = handle_fpu_swa(1, regs, isr);
669 if (!status) {
670 vcpu_increment_iip(v);
671 return;
672 }
673 // fetch code fail
674 if (IA64_RETRY == status)
675 return;
676 printk("ia64_handle_reflection: handling FP fault\n");
677 vector = IA64_FP_FAULT_VECTOR;
678 break;
679 case 33:
680 status = handle_fpu_swa(0, regs, isr);
681 if (!status)
682 return;
683 // fetch code fail
684 if (IA64_RETRY == status)
685 return;
686 printk("ia64_handle_reflection: handling FP trap\n");
687 vector = IA64_FP_TRAP_VECTOR;
688 break;
689 case 34:
690 if (isr & (1UL << 4))
691 printk("ia64_handle_reflection: handling "
692 "unimplemented instruction address %s\n",
693 (isr & (1UL<<32)) ? "fault" : "trap");
694 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
695 break;
696 case 35:
697 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
698 if (debugger_trap_entry(vector,regs))
699 return;
700 break;
701 case 36:
702 vector = IA64_SINGLE_STEP_TRAP_VECTOR;
703 if (debugger_trap_entry(vector,regs))
704 return;
705 break;
707 default:
708 panic_domain(regs, "ia64_handle_reflection: "
709 "unhandled vector=0x%lx\n", vector);
710 return;
711 }
712 if (check_lazy_cover && (isr & IA64_ISR_IR) &&
713 handle_lazy_cover(v, regs))
714 return;
715 PSCB(current, ifa) = ifa;
716 PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
717 reflect_interruption(isr, regs, vector);
718 }
720 void
721 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
722 unsigned long isr, struct pt_regs *regs)
723 {
724 struct vcpu *v = current;
725 struct domain *d = current->domain;
726 unsigned long gpfn;
727 unsigned long pte = 0;
728 struct vhpt_lf_entry *vlfe;
730 /* There are 2 jobs to do:
731 - marking the page as dirty (the metaphysical address must be
732 extracted to do that).
733 - reflecting or not the fault (the virtual Dirty bit must be
734 extracted to decide).
735 Unfortunatly these informations are not immediatly available!
736 */
738 /* Extract the metaphysical address.
739 Try to get it from VHPT and M2P as we need the flags. */
740 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
741 pte = vlfe->page_flags;
742 if (vlfe->ti_tag == ia64_ttag(ifa)) {
743 /* The VHPT entry is valid. */
744 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
745 BUG_ON(gpfn == INVALID_M2P_ENTRY);
746 } else {
747 unsigned long itir, iha;
748 IA64FAULT fault;
750 /* The VHPT entry is not valid. */
751 vlfe = NULL;
753 /* FIXME: gives a chance to tpa, as the TC was valid. */
755 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
757 /* Try again! */
758 if (fault != IA64_NO_FAULT) {
759 /* This will trigger a dtlb miss. */
760 ia64_ptcl(ifa, PAGE_SHIFT << 2);
761 return;
762 }
763 gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
764 if (pte & _PAGE_D)
765 pte |= _PAGE_VIRT_D;
766 }
768 /* Set the dirty bit in the bitmap. */
769 shadow_mark_page_dirty(d, gpfn);
771 /* Update the local TC/VHPT and decides wether or not the fault should
772 be reflected.
773 SMP note: we almost ignore the other processors. The shadow_bitmap
774 has been atomically updated. If the dirty fault happen on another
775 processor, it will do its job.
776 */
778 if (pte != 0) {
779 /* We will know how to handle the fault. */
781 if (pte & _PAGE_VIRT_D) {
782 /* Rewrite VHPT entry.
783 There is no race here because only the
784 cpu VHPT owner can write page_flags. */
785 if (vlfe)
786 vlfe->page_flags = pte | _PAGE_D;
788 /* Purge the TC locally.
789 It will be reloaded from the VHPT iff the
790 VHPT entry is still valid. */
791 ia64_ptcl(ifa, PAGE_SHIFT << 2);
793 atomic64_inc(&d->arch.shadow_fault_count);
794 } else {
795 /* Reflect.
796 In this case there is no need to purge. */
797 ia64_handle_reflection(ifa, regs, isr, 0, 8);
798 }
799 } else {
800 /* We don't know wether or not the fault must be
801 reflected. The VHPT entry is not valid. */
802 /* FIXME: in metaphysical mode, we could do an ITC now. */
803 ia64_ptcl(ifa, PAGE_SHIFT << 2);
804 }
805 }