ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 15374:0944634d4639

[IA64] Improve priv_emulate() isr.code handling for IA64_GENEX_VECTOR

Better handling of isr.code if priv_emulate() fails with IA64_GENEX_VECTOR.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jun 18 13:41:31 2007 -0600 (2007-06-18)
parents 3e170567505a
children 4d159746d0e6
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
16 #include <xen/perfc.h>
17 #include <xen/mm.h>
19 #include <asm/system.h>
20 #include <asm/processor.h>
21 #include <xen/irq.h>
22 #include <xen/event.h>
23 #include <asm/privop.h>
24 #include <asm/vcpu.h>
25 #include <asm/ia64_int.h>
26 #include <asm/dom_fw.h>
27 #include <asm/vhpt.h>
28 #include <asm/debugger.h>
29 #include <asm/fpswa.h>
30 #include <asm/bundle.h>
31 #include <asm/asm-xsi-offsets.h>
32 #include <asm/shadow.h>
33 #include <asm/uaccess.h>
34 #include <asm/p2m_entry.h>
36 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37 /* FIXME: where these declarations shold be there ? */
38 extern int ia64_hyperprivop(unsigned long, REGS *);
39 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
42 // note IA64_PSR_PK removed from following, why is this necessary?
43 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
44 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
45 IA64_PSR_IT | IA64_PSR_BN)
47 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
48 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
49 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
50 IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \
51 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
52 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
54 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
56 // should never panic domain... if it does, stack may have been overrun
57 static void check_bad_nested_interruption(unsigned long isr,
58 struct pt_regs *regs,
59 unsigned long vector)
60 {
61 struct vcpu *v = current;
63 if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
64 panic_domain(regs,
65 "psr.dt off, trying to deliver nested dtlb!\n");
66 }
67 vector &= ~0xf;
68 if (vector != IA64_DATA_TLB_VECTOR &&
69 vector != IA64_ALT_DATA_TLB_VECTOR &&
70 vector != IA64_VHPT_TRANS_VECTOR) {
71 panic_domain(regs, "psr.ic off, delivering fault=%lx,"
72 "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
73 vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
74 isr, PSCB(v, iip));
75 }
76 }
78 static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
79 unsigned long vector)
80 {
81 struct vcpu *v = current;
83 if (!PSCB(v, interrupt_collection_enabled))
84 check_bad_nested_interruption(isr, regs, vector);
85 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
86 PSCB(v, precover_ifs) = regs->cr_ifs;
87 PSCB(v, ipsr) = vcpu_get_psr(v);
88 vcpu_bsw0(v);
89 PSCB(v, isr) = isr;
90 PSCB(v, iip) = regs->cr_iip;
91 PSCB(v, ifs) = 0;
93 regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
94 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
95 if (PSCB(v, dcr) & IA64_DCR_BE)
96 regs->cr_ipsr |= IA64_PSR_BE;
98 if (PSCB(v, hpsr_dfh))
99 regs->cr_ipsr |= IA64_PSR_DFH;
100 PSCB(v, vpsr_dfh) = 0;
101 v->vcpu_info->evtchn_upcall_mask = 1;
102 PSCB(v, interrupt_collection_enabled) = 0;
104 perfc_incra(slow_reflect, vector >> 8);
105 }
107 void reflect_event(void)
108 {
109 struct vcpu *v = current;
110 struct pt_regs *regs;
111 unsigned long isr;
113 if (!event_pending(v))
114 return;
116 /* Sanity check */
117 if (is_idle_vcpu(v)) {
118 //printk("WARN: invocation to reflect_event in nested xen\n");
119 return;
120 }
122 regs = vcpu_regs(v);
124 isr = regs->cr_ipsr & IA64_PSR_RI;
126 if (!PSCB(v, interrupt_collection_enabled))
127 printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
128 "isr=%lx,viip=0x%lx\n",
129 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
130 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
131 PSCB(v, precover_ifs) = regs->cr_ifs;
132 PSCB(v, ipsr) = vcpu_get_psr(v);
133 vcpu_bsw0(v);
134 PSCB(v, isr) = isr;
135 PSCB(v, iip) = regs->cr_iip;
136 PSCB(v, ifs) = 0;
138 regs->cr_iip = v->arch.event_callback_ip;
139 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
140 if (PSCB(v, dcr) & IA64_DCR_BE)
141 regs->cr_ipsr |= IA64_PSR_BE;
143 if (PSCB(v, hpsr_dfh))
144 regs->cr_ipsr |= IA64_PSR_DFH;
145 PSCB(v, vpsr_dfh) = 0;
146 v->vcpu_info->evtchn_upcall_mask = 1;
147 PSCB(v, interrupt_collection_enabled) = 0;
148 }
150 static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
151 {
152 if (!PSCB(v, interrupt_collection_enabled)) {
153 PSCB(v, ifs) = regs->cr_ifs;
154 regs->cr_ifs = 0;
155 perfc_incr(lazy_cover);
156 return 1; // retry same instruction with cr.ifs off
157 }
158 return 0;
159 }
161 void ia64_do_page_fault(unsigned long address, unsigned long isr,
162 struct pt_regs *regs, unsigned long itir)
163 {
164 unsigned long iip = regs->cr_iip, iha;
165 // FIXME should validate address here
166 unsigned long pteval;
167 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
168 IA64FAULT fault;
169 int is_ptc_l_needed = 0;
170 u64 logps;
172 if ((isr & IA64_ISR_SP)
173 || ((isr & IA64_ISR_NA)
174 && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
175 /*
176 * This fault was due to a speculative load or lfetch.fault,
177 * set the "ed" bit in the psr to ensure forward progress.
178 * (Target register will get a NaT for ld.s, lfetch will be
179 * canceled.)
180 */
181 ia64_psr(regs)->ed = 1;
182 return;
183 }
185 again:
186 fault = vcpu_translate(current, address, is_data, &pteval,
187 &itir, &iha);
188 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
189 struct p2m_entry entry;
190 unsigned long m_pteval;
191 m_pteval = translate_domain_pte(pteval, address, itir,
192 &logps, &entry);
193 vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
194 m_pteval, pteval, logps, &entry);
195 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
196 p2m_entry_retry(&entry)) {
197 /* dtlb has been purged in-between. This dtlb was
198 matching. Undo the work. */
199 vcpu_flush_tlb_vhpt_range(address, logps);
201 // the stale entry which we inserted above
202 // may remains in tlb cache.
203 // we don't purge it now hoping next itc purges it.
204 is_ptc_l_needed = 1;
205 goto again;
206 }
207 return;
208 }
210 if (is_ptc_l_needed)
211 vcpu_ptc_l(current, address, logps);
212 if (!guest_mode(regs)) {
213 /* The fault occurs inside Xen. */
214 if (!ia64_done_with_exception(regs)) {
215 // should never happen. If it does, region 0 addr may
216 // indicate a bad xen pointer
217 printk("*** xen_handle_domain_access: exception table"
218 " lookup failed, iip=0x%lx, addr=0x%lx, "
219 "spinning...\n", iip, address);
220 panic_domain(regs, "*** xen_handle_domain_access: "
221 "exception table lookup failed, "
222 "iip=0x%lx, addr=0x%lx, spinning...\n",
223 iip, address);
224 }
225 return;
226 }
228 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
229 return;
231 if (!PSCB(current, interrupt_collection_enabled)) {
232 check_bad_nested_interruption(isr, regs, fault);
233 //printk("Delivering NESTED DATA TLB fault\n");
234 fault = IA64_DATA_NESTED_TLB_VECTOR;
235 regs->cr_iip =
236 ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
237 regs->cr_ipsr =
238 (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
240 if (PSCB(current, hpsr_dfh))
241 regs->cr_ipsr |= IA64_PSR_DFH;
242 PSCB(current, vpsr_dfh) = 0;
243 perfc_incra(slow_reflect, fault >> 8);
244 return;
245 }
247 PSCB(current, itir) = itir;
248 PSCB(current, iha) = iha;
249 PSCB(current, ifa) = address;
250 reflect_interruption(isr, regs, fault);
251 }
253 fpswa_interface_t *fpswa_interface = 0;
255 void __init trap_init(void)
256 {
257 if (ia64_boot_param->fpswa)
258 /* FPSWA fixup: make the interface pointer a virtual address */
259 fpswa_interface = __va(ia64_boot_param->fpswa);
260 else
261 printk("No FPSWA supported.\n");
262 }
264 static fpswa_ret_t
265 fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
266 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
267 unsigned long *ifs, struct pt_regs *regs)
268 {
269 fp_state_t fp_state;
270 fpswa_ret_t ret;
272 if (!fpswa_interface)
273 return (fpswa_ret_t) {-1, 0, 0, 0};
275 memset(&fp_state, 0, sizeof(fp_state_t));
277 /*
278 * compute fp_state. only FP registers f6 - f11 are used by the
279 * kernel, so set those bits in the mask and set the low volatile
280 * pointer to point to these registers.
281 */
282 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
284 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
285 /*
286 * unsigned long (*EFI_FPSWA) (
287 * unsigned long trap_type,
288 * void *Bundle,
289 * unsigned long *pipsr,
290 * unsigned long *pfsr,
291 * unsigned long *pisr,
292 * unsigned long *ppreds,
293 * unsigned long *pifs,
294 * void *fp_state);
295 */
296 ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
297 ipsr, fpsr, isr, pr, ifs, &fp_state);
299 return ret;
300 }
302 /*
303 * Handle floating-point assist faults and traps for domain.
304 */
305 unsigned long
306 handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
307 {
308 struct vcpu *v = current;
309 IA64_BUNDLE bundle;
310 unsigned long fault_ip;
311 fpswa_ret_t ret;
313 fault_ip = regs->cr_iip;
314 /*
315 * When the FP trap occurs, the trapping instruction is completed.
316 * If ipsr.ri == 0, there is the trapping instruction in previous
317 * bundle.
318 */
319 if (!fp_fault && (ia64_psr(regs)->ri == 0))
320 fault_ip -= 16;
322 if (VMX_DOMAIN(current)) {
323 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
324 return IA64_RETRY;
325 } else
326 bundle = __get_domain_bundle(fault_ip);
328 if (!bundle.i64[0] && !bundle.i64[1]) {
329 printk("%s: floating-point bundle at 0x%lx not mapped\n",
330 __FUNCTION__, fault_ip);
331 return -1;
332 }
334 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
335 &isr, &regs->pr, &regs->cr_ifs, regs);
337 if (ret.status) {
338 PSCBX(v, fpswa_ret) = ret;
339 printk("%s(%s): fp_emulate() returned %ld\n",
340 __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
341 }
343 return ret.status;
344 }
346 void
347 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
348 unsigned long iim, unsigned long itir, unsigned long arg5,
349 unsigned long arg6, unsigned long arg7, unsigned long stack)
350 {
351 struct pt_regs *regs = (struct pt_regs *)&stack;
352 unsigned long code;
353 static const char *const reason[] = {
354 "IA-64 Illegal Operation fault",
355 "IA-64 Privileged Operation fault",
356 "IA-64 Privileged Register fault",
357 "IA-64 Reserved Register/Field fault",
358 "Disabled Instruction Set Transition fault",
359 "Unknown fault 5", "Unknown fault 6",
360 "Unknown fault 7", "Illegal Hazard fault",
361 "Unknown fault 9", "Unknown fault 10",
362 "Unknown fault 11", "Unknown fault 12",
363 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
364 };
366 printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
367 "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
368 regs->cr_iip, regs->cr_ipsr, isr);
370 if ((isr & IA64_ISR_NA) &&
371 ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
372 /*
373 * This fault was due to lfetch.fault, set "ed" bit in the
374 * psr to cancel the lfetch.
375 */
376 ia64_psr(regs)->ed = 1;
377 printk("ia64_fault: handled lfetch.fault\n");
378 return;
379 }
381 switch (vector) {
382 case 0:
383 printk("VHPT Translation.\n");
384 break;
386 case 4:
387 printk("Alt DTLB.\n");
388 break;
390 case 6:
391 printk("Instruction Key Miss.\n");
392 break;
394 case 7:
395 printk("Data Key Miss.\n");
396 break;
398 case 8:
399 printk("Dirty-bit.\n");
400 break;
402 case 20:
403 printk("Page Not Found.\n");
404 break;
406 case 21:
407 printk("Key Permission.\n");
408 break;
410 case 22:
411 printk("Instruction Access Rights.\n");
412 break;
414 case 24: /* General Exception */
415 code = (isr >> 4) & 0xf;
416 printk("General Exception: %s%s.\n", reason[code],
417 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
418 " (data access)") : "");
419 if (code == 8) {
420 #ifdef CONFIG_IA64_PRINT_HAZARDS
421 printk("%s[%d]: possible hazard @ ip=%016lx "
422 "(pr = %016lx)\n", current->comm, current->pid,
423 regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
424 #endif
425 printk("ia64_fault: returning on hazard\n");
426 return;
427 }
428 break;
430 case 25:
431 printk("Disabled FP-Register.\n");
432 break;
434 case 26:
435 printk("NaT consumption.\n");
436 break;
438 case 29:
439 printk("Debug.\n");
440 break;
442 case 30:
443 printk("Unaligned Reference.\n");
444 break;
446 case 31:
447 printk("Unsupported data reference.\n");
448 break;
450 case 32:
451 printk("Floating-Point Fault.\n");
452 break;
454 case 33:
455 printk("Floating-Point Trap.\n");
456 break;
458 case 34:
459 printk("Lower Privilege Transfer Trap.\n");
460 break;
462 case 35:
463 printk("Taken Branch Trap.\n");
464 break;
466 case 36:
467 printk("Single Step Trap.\n");
468 break;
470 case 45:
471 printk("IA-32 Exception.\n");
472 break;
474 case 46:
475 printk("IA-32 Intercept.\n");
476 break;
478 case 47:
479 printk("IA-32 Interrupt.\n");
480 break;
482 default:
483 printk("Fault %lu\n", vector);
484 break;
485 }
487 show_registers(regs);
488 panic("Fault in Xen.\n");
489 }
491 unsigned long running_on_sim = 0;
493 /* Also read in hyperprivop.S */
494 int first_break = 0;
496 void
497 ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
498 unsigned long iim)
499 {
500 struct domain *d = current->domain;
501 struct vcpu *v = current;
502 IA64FAULT vector;
504 /* FIXME: don't hardcode constant */
505 if ((iim == 0x80001 || iim == 0x80002)
506 && ia64_get_cpl(regs->cr_ipsr) == 2) {
507 do_ssc(vcpu_get_gr(current, 36), regs);
508 }
509 #ifdef CRASH_DEBUG
510 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
511 if (iim == 0)
512 show_registers(regs);
513 debugger_trap_fatal(0 /* don't care */ , regs);
514 }
515 #endif
516 else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == 2) {
517 /* by default, do not continue */
518 v->arch.hypercall_continuation = 0;
520 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
521 if (!PSCBX(v, hypercall_continuation))
522 vcpu_increment_iip(current);
523 } else
524 reflect_interruption(isr, regs, vector);
525 } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
526 && ia64_get_cpl(regs->cr_ipsr) == 2) {
527 if (ia64_hyperprivop(iim, regs))
528 vcpu_increment_iip(current);
529 } else {
530 if (iim == 0)
531 die_if_kernel("bug check", regs, iim);
532 PSCB(v, iim) = iim;
533 reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
534 }
535 }
537 void
538 ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
539 unsigned long itir)
540 {
541 IA64FAULT vector;
543 vector = priv_emulate(current, regs, isr);
544 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
545 // Note: if a path results in a vector to reflect that requires
546 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
547 /*
548 * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
549 * see IA64_ILLOP_FAULT, ...
550 */
551 if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
552 isr = vector & 0xffUL;
553 vector = IA64_GENEX_VECTOR;
554 }
555 reflect_interruption(isr, regs, vector);
556 }
557 }
559 void
560 ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
561 unsigned long isr, unsigned long iim,
562 unsigned long vector)
563 {
564 struct vcpu *v = current;
565 unsigned long check_lazy_cover = 0;
566 unsigned long psr = regs->cr_ipsr;
567 unsigned long status;
569 /* Following faults shouldn't be seen from Xen itself */
570 BUG_ON(!(psr & IA64_PSR_CPL));
572 switch (vector) {
573 case 8:
574 vector = IA64_DIRTY_BIT_VECTOR;
575 break;
576 case 9:
577 vector = IA64_INST_ACCESS_BIT_VECTOR;
578 break;
579 case 10:
580 check_lazy_cover = 1;
581 vector = IA64_DATA_ACCESS_BIT_VECTOR;
582 break;
583 case 20:
584 check_lazy_cover = 1;
585 vector = IA64_PAGE_NOT_PRESENT_VECTOR;
586 break;
587 case 22:
588 vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
589 break;
590 case 23:
591 check_lazy_cover = 1;
592 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
593 break;
594 case 24:
595 vector = IA64_GENEX_VECTOR;
596 break;
597 case 25:
598 if (PSCB(v, hpsr_dfh)) {
599 PSCB(v, hpsr_dfh) = 0;
600 PSCB(v, hpsr_mfh) = 1;
601 if (__ia64_per_cpu_var(fp_owner) != v)
602 __ia64_load_fpu(v->arch._thread.fph);
603 }
604 if (!PSCB(v, vpsr_dfh)) {
605 regs->cr_ipsr &= ~IA64_PSR_DFH;
606 return;
607 }
608 vector = IA64_DISABLED_FPREG_VECTOR;
609 break;
610 case 26:
611 if (((isr >> 4L) & 0xfL) == 1) {
612 /* Fault is due to a register NaT consumption fault. */
613 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
614 printk("ia64_handle_reflection: handling regNaT "
615 "fault\n");
616 vector = IA64_NAT_CONSUMPTION_VECTOR;
617 break;
618 }
619 #if 1
620 // pass null pointer dereferences through with no error
621 // but retain debug output for non-zero ifa
622 if (!ifa) {
623 vector = IA64_NAT_CONSUMPTION_VECTOR;
624 break;
625 }
626 #endif
627 #ifdef CONFIG_PRIVIFY
628 /* Some privified operations are coded using reg+64 instead
629 of reg. */
630 printk("*** NaT fault... attempting to handle as privop\n");
631 printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
632 isr, ifa, regs->cr_iip, psr);
633 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
634 // certain NaT faults are higher priority than privop faults
635 vector = priv_emulate(v, regs, isr);
636 if (vector == IA64_NO_FAULT) {
637 printk("*** Handled privop masquerading as NaT "
638 "fault\n");
639 return;
640 }
641 #endif
642 vector = IA64_NAT_CONSUMPTION_VECTOR;
643 break;
644 case 27:
645 //printk("*** Handled speculation vector, itc=%lx!\n",
646 // ia64_get_itc());
647 PSCB(current, iim) = iim;
648 vector = IA64_SPECULATION_VECTOR;
649 break;
650 case 30:
651 // FIXME: Should we handle unaligned refs in Xen??
652 vector = IA64_UNALIGNED_REF_VECTOR;
653 break;
654 case 32:
655 status = handle_fpu_swa(1, regs, isr);
656 if (!status) {
657 vcpu_increment_iip(v);
658 return;
659 }
660 // fetch code fail
661 if (IA64_RETRY == status)
662 return;
663 printk("ia64_handle_reflection: handling FP fault\n");
664 vector = IA64_FP_FAULT_VECTOR;
665 break;
666 case 33:
667 status = handle_fpu_swa(0, regs, isr);
668 if (!status)
669 return;
670 // fetch code fail
671 if (IA64_RETRY == status)
672 return;
673 printk("ia64_handle_reflection: handling FP trap\n");
674 vector = IA64_FP_TRAP_VECTOR;
675 break;
676 case 34:
677 if (isr & (1UL << 4))
678 printk("ia64_handle_reflection: handling "
679 "unimplemented instruction address %s\n",
680 (isr & (1UL<<32)) ? "fault" : "trap");
681 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
682 break;
683 case 35:
684 printk("ia64_handle_reflection: handling taken branch trap\n");
685 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
686 break;
687 case 36:
688 printk("ia64_handle_reflection: handling single step trap\n");
689 vector = IA64_SINGLE_STEP_TRAP_VECTOR;
690 break;
692 default:
693 printk("ia64_handle_reflection: unhandled vector=0x%lx\n",
694 vector);
695 while (vector)
696 /* spin */;
697 return;
698 }
699 if (check_lazy_cover && (isr & IA64_ISR_IR) &&
700 handle_lazy_cover(v, regs))
701 return;
702 PSCB(current, ifa) = ifa;
703 PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
704 reflect_interruption(isr, regs, vector);
705 }
707 void
708 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
709 unsigned long isr, struct pt_regs *regs)
710 {
711 struct vcpu *v = current;
712 struct domain *d = current->domain;
713 unsigned long gpfn;
714 unsigned long pte = 0;
715 struct vhpt_lf_entry *vlfe;
717 /* There are 2 jobs to do:
718 - marking the page as dirty (the metaphysical address must be
719 extracted to do that).
720 - reflecting or not the fault (the virtual Dirty bit must be
721 extracted to decide).
722 Unfortunatly these informations are not immediatly available!
723 */
725 /* Extract the metaphysical address.
726 Try to get it from VHPT and M2P as we need the flags. */
727 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
728 pte = vlfe->page_flags;
729 if (vlfe->ti_tag == ia64_ttag(ifa)) {
730 /* The VHPT entry is valid. */
731 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
732 BUG_ON(gpfn == INVALID_M2P_ENTRY);
733 } else {
734 unsigned long itir, iha;
735 IA64FAULT fault;
737 /* The VHPT entry is not valid. */
738 vlfe = NULL;
740 /* FIXME: gives a chance to tpa, as the TC was valid. */
742 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
744 /* Try again! */
745 if (fault != IA64_NO_FAULT) {
746 /* This will trigger a dtlb miss. */
747 ia64_ptcl(ifa, PAGE_SHIFT << 2);
748 return;
749 }
750 gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
751 if (pte & _PAGE_D)
752 pte |= _PAGE_VIRT_D;
753 }
755 /* Set the dirty bit in the bitmap. */
756 shadow_mark_page_dirty(d, gpfn);
758 /* Update the local TC/VHPT and decides wether or not the fault should
759 be reflected.
760 SMP note: we almost ignore the other processors. The shadow_bitmap
761 has been atomically updated. If the dirty fault happen on another
762 processor, it will do its job.
763 */
765 if (pte != 0) {
766 /* We will know how to handle the fault. */
768 if (pte & _PAGE_VIRT_D) {
769 /* Rewrite VHPT entry.
770 There is no race here because only the
771 cpu VHPT owner can write page_flags. */
772 if (vlfe)
773 vlfe->page_flags = pte | _PAGE_D;
775 /* Purge the TC locally.
776 It will be reloaded from the VHPT iff the
777 VHPT entry is still valid. */
778 ia64_ptcl(ifa, PAGE_SHIFT << 2);
780 atomic64_inc(&d->arch.shadow_fault_count);
781 } else {
782 /* Reflect.
783 In this case there is no need to purge. */
784 ia64_handle_reflection(ifa, regs, isr, 0, 8);
785 }
786 } else {
787 /* We don't know wether or not the fault must be
788 reflected. The VHPT entry is not valid. */
789 /* FIXME: in metaphysical mode, we could do an ITC now. */
790 ia64_ptcl(ifa, PAGE_SHIFT << 2);
791 }
792 }