ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents eae7b887e5ac
children 33d0cf9474f9
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
16 #include <xen/perfc.h>
17 #include <xen/mm.h>
19 #include <asm/system.h>
20 #include <asm/processor.h>
21 #include <xen/irq.h>
22 #include <xen/event.h>
23 #include <asm/privop.h>
24 #include <asm/vcpu.h>
25 #include <asm/ia64_int.h>
26 #include <asm/dom_fw.h>
27 #include <asm/vhpt.h>
28 #include <asm/debugger.h>
29 #include <asm/fpswa.h>
30 #include <asm/bundle.h>
31 #include <asm/asm-xsi-offsets.h>
32 #include <asm/shadow.h>
33 #include <asm/uaccess.h>
34 #include <asm/p2m_entry.h>
36 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37 /* FIXME: where these declarations shold be there ? */
38 extern int ia64_hyperprivop(unsigned long, REGS *);
39 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
43 // should never panic domain... if it does, stack may have been overrun
44 static void check_bad_nested_interruption(unsigned long isr,
45 struct pt_regs *regs,
46 unsigned long vector)
47 {
48 struct vcpu *v = current;
50 if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
51 panic_domain(regs,
52 "psr.dt off, trying to deliver nested dtlb!\n");
53 }
54 vector &= ~0xf;
55 if (vector != IA64_DATA_TLB_VECTOR &&
56 vector != IA64_ALT_DATA_TLB_VECTOR &&
57 vector != IA64_VHPT_TRANS_VECTOR) {
58 panic_domain(regs, "psr.ic off, delivering fault=%lx,"
59 "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
60 vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
61 isr, PSCB(v, iip));
62 }
63 }
65 static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
66 unsigned long vector)
67 {
68 struct vcpu *v = current;
70 if (!PSCB(v, interrupt_collection_enabled))
71 check_bad_nested_interruption(isr, regs, vector);
72 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
73 PSCB(v, precover_ifs) = regs->cr_ifs;
74 PSCB(v, ipsr) = vcpu_get_psr(v);
75 vcpu_bsw0(v);
76 PSCB(v, isr) = isr;
77 PSCB(v, iip) = regs->cr_iip;
78 PSCB(v, ifs) = 0;
80 regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
81 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
82 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
83 if (PSCB(v, dcr) & IA64_DCR_BE)
84 regs->cr_ipsr |= IA64_PSR_BE;
85 else
86 regs->cr_ipsr &= ~IA64_PSR_BE;
88 if (PSCB(v, hpsr_dfh))
89 regs->cr_ipsr |= IA64_PSR_DFH;
90 PSCB(v, vpsr_dfh) = 0;
91 v->vcpu_info->evtchn_upcall_mask = 1;
92 PSCB(v, interrupt_collection_enabled) = 0;
94 perfc_incra(slow_reflect, vector >> 8);
96 debugger_event(vector == IA64_EXTINT_VECTOR ?
97 XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
98 }
100 void reflect_event(void)
101 {
102 struct vcpu *v = current;
103 struct pt_regs *regs;
104 unsigned long isr;
106 if (!event_pending(v))
107 return;
109 /* Sanity check */
110 if (is_idle_vcpu(v)) {
111 //printk("WARN: invocation to reflect_event in nested xen\n");
112 return;
113 }
115 regs = vcpu_regs(v);
117 isr = regs->cr_ipsr & IA64_PSR_RI;
119 if (!PSCB(v, interrupt_collection_enabled))
120 printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
121 "isr=%lx,viip=0x%lx\n",
122 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
123 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
124 PSCB(v, precover_ifs) = regs->cr_ifs;
125 PSCB(v, ipsr) = vcpu_get_psr(v);
126 vcpu_bsw0(v);
127 PSCB(v, isr) = isr;
128 PSCB(v, iip) = regs->cr_iip;
129 PSCB(v, ifs) = 0;
131 regs->cr_iip = v->arch.event_callback_ip;
132 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
133 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
134 if (PSCB(v, dcr) & IA64_DCR_BE)
135 regs->cr_ipsr |= IA64_PSR_BE;
136 else
137 regs->cr_ipsr &= ~IA64_PSR_BE;
140 if (PSCB(v, hpsr_dfh))
141 regs->cr_ipsr |= IA64_PSR_DFH;
142 PSCB(v, vpsr_dfh) = 0;
143 v->vcpu_info->evtchn_upcall_mask = 1;
144 PSCB(v, interrupt_collection_enabled) = 0;
146 debugger_event(XEN_IA64_DEBUG_ON_EVENT);
147 }
149 static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
150 {
151 if (!PSCB(v, interrupt_collection_enabled)) {
152 PSCB(v, ifs) = regs->cr_ifs;
153 regs->cr_ifs = 0;
154 perfc_incr(lazy_cover);
155 return 1; // retry same instruction with cr.ifs off
156 }
157 return 0;
158 }
160 void ia64_do_page_fault(unsigned long address, unsigned long isr,
161 struct pt_regs *regs, unsigned long itir)
162 {
163 unsigned long iip = regs->cr_iip, iha;
164 // FIXME should validate address here
165 unsigned long pteval;
166 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
167 IA64FAULT fault;
168 int is_ptc_l_needed = 0;
169 ia64_itir_t _itir = {.itir = itir};
171 if ((isr & IA64_ISR_SP)
172 || ((isr & IA64_ISR_NA)
173 && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
174 /*
175 * This fault was due to a speculative load or lfetch.fault,
176 * set the "ed" bit in the psr to ensure forward progress.
177 * (Target register will get a NaT for ld.s, lfetch will be
178 * canceled.)
179 */
180 ia64_psr(regs)->ed = 1;
181 return;
182 }
184 again:
185 fault = vcpu_translate(current, address, is_data, &pteval,
186 &itir, &iha);
187 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
188 struct p2m_entry entry;
189 unsigned long m_pteval;
190 m_pteval = translate_domain_pte(pteval, address, itir,
191 &(_itir.itir), &entry);
192 vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
193 m_pteval, pteval, _itir.itir, &entry);
194 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
195 p2m_entry_retry(&entry)) {
196 /* dtlb has been purged in-between. This dtlb was
197 matching. Undo the work. */
198 vcpu_flush_tlb_vhpt_range(address, _itir.ps);
200 // the stale entry which we inserted above
201 // may remains in tlb cache.
202 // we don't purge it now hoping next itc purges it.
203 is_ptc_l_needed = 1;
204 goto again;
205 }
206 return;
207 }
209 if (is_ptc_l_needed)
210 vcpu_ptc_l(current, address, _itir.ps);
211 if (!guest_mode(regs)) {
212 /* The fault occurs inside Xen. */
213 if (!ia64_done_with_exception(regs)) {
214 // should never happen. If it does, region 0 addr may
215 // indicate a bad xen pointer
216 printk("*** xen_handle_domain_access: exception table"
217 " lookup failed, iip=0x%lx, addr=0x%lx, "
218 "spinning...\n", iip, address);
219 panic_domain(regs, "*** xen_handle_domain_access: "
220 "exception table lookup failed, "
221 "iip=0x%lx, addr=0x%lx, spinning...\n",
222 iip, address);
223 }
224 return;
225 }
227 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
228 return;
230 if (!PSCB(current, interrupt_collection_enabled)) {
231 check_bad_nested_interruption(isr, regs, fault);
232 //printk("Delivering NESTED DATA TLB fault\n");
233 fault = IA64_DATA_NESTED_TLB_VECTOR;
234 regs->cr_iip =
235 ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
236 regs->cr_ipsr =
237 (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
238 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
239 IA64_PSR_CPL0_BIT);
240 if (PSCB(current, dcr) & IA64_DCR_BE)
241 regs->cr_ipsr |= IA64_PSR_BE;
242 else
243 regs->cr_ipsr &= ~IA64_PSR_BE;
246 if (PSCB(current, hpsr_dfh))
247 regs->cr_ipsr |= IA64_PSR_DFH;
248 PSCB(current, vpsr_dfh) = 0;
249 perfc_incra(slow_reflect, fault >> 8);
250 return;
251 }
253 PSCB(current, itir) = itir;
254 PSCB(current, iha) = iha;
255 PSCB(current, ifa) = address;
256 reflect_interruption(isr, regs, fault);
257 }
259 fpswa_interface_t *fpswa_interface = 0;
261 void __init trap_init(void)
262 {
263 if (ia64_boot_param->fpswa)
264 /* FPSWA fixup: make the interface pointer a virtual address */
265 fpswa_interface = __va(ia64_boot_param->fpswa);
266 else
267 printk("No FPSWA supported.\n");
268 }
270 static fpswa_ret_t
271 fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
272 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
273 unsigned long *ifs, struct pt_regs *regs)
274 {
275 fp_state_t fp_state;
276 fpswa_ret_t ret;
278 if (!fpswa_interface)
279 return (fpswa_ret_t) {-1, 0, 0, 0};
281 memset(&fp_state, 0, sizeof(fp_state_t));
283 /*
284 * compute fp_state. only FP registers f6 - f11 are used by the
285 * kernel, so set those bits in the mask and set the low volatile
286 * pointer to point to these registers.
287 */
288 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
290 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
291 /*
292 * unsigned long (*EFI_FPSWA) (
293 * unsigned long trap_type,
294 * void *Bundle,
295 * unsigned long *pipsr,
296 * unsigned long *pfsr,
297 * unsigned long *pisr,
298 * unsigned long *ppreds,
299 * unsigned long *pifs,
300 * void *fp_state);
301 */
302 ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
303 ipsr, fpsr, isr, pr, ifs, &fp_state);
305 return ret;
306 }
308 /*
309 * Handle floating-point assist faults and traps for domain.
310 */
311 unsigned long
312 handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
313 {
314 struct vcpu *v = current;
315 IA64_BUNDLE bundle;
316 unsigned long fault_ip;
317 fpswa_ret_t ret;
319 fault_ip = regs->cr_iip;
320 /*
321 * When the FP trap occurs, the trapping instruction is completed.
322 * If ipsr.ri == 0, there is the trapping instruction in previous
323 * bundle.
324 */
325 if (!fp_fault && (ia64_psr(regs)->ri == 0))
326 fault_ip -= 16;
328 if (VMX_DOMAIN(current)) {
329 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
330 return IA64_RETRY;
331 } else
332 bundle = __get_domain_bundle(fault_ip);
334 if (!bundle.i64[0] && !bundle.i64[1]) {
335 printk("%s: floating-point bundle at 0x%lx not mapped\n",
336 __FUNCTION__, fault_ip);
337 return -1;
338 }
340 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
341 &isr, &regs->pr, &regs->cr_ifs, regs);
343 if (ret.status) {
344 PSCBX(v, fpswa_ret) = ret;
345 printk("%s(%s): fp_emulate() returned %ld\n",
346 __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
347 }
349 return ret.status;
350 }
352 void
353 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
354 unsigned long iim, unsigned long itir, unsigned long arg5,
355 unsigned long arg6, unsigned long arg7, unsigned long stack)
356 {
357 struct pt_regs *regs = (struct pt_regs *)&stack;
358 unsigned long code;
359 static const char *const reason[] = {
360 "IA-64 Illegal Operation fault",
361 "IA-64 Privileged Operation fault",
362 "IA-64 Privileged Register fault",
363 "IA-64 Reserved Register/Field fault",
364 "Disabled Instruction Set Transition fault",
365 "Unknown fault 5", "Unknown fault 6",
366 "Unknown fault 7", "Illegal Hazard fault",
367 "Unknown fault 9", "Unknown fault 10",
368 "Unknown fault 11", "Unknown fault 12",
369 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
370 };
372 printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
373 "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
374 regs->cr_iip, regs->cr_ipsr, isr);
376 if ((isr & IA64_ISR_NA) &&
377 ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
378 /*
379 * This fault was due to lfetch.fault, set "ed" bit in the
380 * psr to cancel the lfetch.
381 */
382 ia64_psr(regs)->ed = 1;
383 printk("ia64_fault: handled lfetch.fault\n");
384 return;
385 }
387 switch (vector) {
388 case 0:
389 printk("VHPT Translation.\n");
390 break;
392 case 4:
393 printk("Alt DTLB.\n");
394 break;
396 case 6:
397 printk("Instruction Key Miss.\n");
398 break;
400 case 7:
401 printk("Data Key Miss.\n");
402 break;
404 case 8:
405 printk("Dirty-bit.\n");
406 break;
408 case 20:
409 printk("Page Not Found.\n");
410 break;
412 case 21:
413 printk("Key Permission.\n");
414 break;
416 case 22:
417 printk("Instruction Access Rights.\n");
418 break;
420 case 24: /* General Exception */
421 code = (isr >> 4) & 0xf;
422 printk("General Exception: %s%s.\n", reason[code],
423 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
424 " (data access)") : "");
425 if (code == 8) {
426 #ifdef CONFIG_IA64_PRINT_HAZARDS
427 printk("%s[%d]: possible hazard @ ip=%016lx "
428 "(pr = %016lx)\n", current->comm, current->pid,
429 regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
430 #endif
431 printk("ia64_fault: returning on hazard\n");
432 return;
433 }
434 break;
436 case 25:
437 printk("Disabled FP-Register.\n");
438 break;
440 case 26:
441 printk("NaT consumption.\n");
442 break;
444 case 29:
445 printk("Debug.\n");
446 break;
448 case 30:
449 printk("Unaligned Reference.\n");
450 break;
452 case 31:
453 printk("Unsupported data reference.\n");
454 break;
456 case 32:
457 printk("Floating-Point Fault.\n");
458 break;
460 case 33:
461 printk("Floating-Point Trap.\n");
462 break;
464 case 34:
465 printk("Lower Privilege Transfer Trap.\n");
466 break;
468 case 35:
469 printk("Taken Branch Trap.\n");
470 break;
472 case 36:
473 printk("Single Step Trap.\n");
474 break;
476 case 45:
477 printk("IA-32 Exception.\n");
478 break;
480 case 46:
481 printk("IA-32 Intercept.\n");
482 break;
484 case 47:
485 printk("IA-32 Interrupt.\n");
486 break;
488 default:
489 printk("Fault %lu\n", vector);
490 break;
491 }
493 show_registers(regs);
494 panic("Fault in Xen.\n");
495 }
497 /* Also read in hyperprivop.S */
498 int first_break = 0;
500 void
501 ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
502 unsigned long iim)
503 {
504 struct domain *d = current->domain;
505 struct vcpu *v = current;
506 IA64FAULT vector;
508 /* FIXME: don't hardcode constant */
509 if ((iim == 0x80001 || iim == 0x80002)
510 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
511 do_ssc(vcpu_get_gr(current, 36), regs);
512 }
513 #ifdef CRASH_DEBUG
514 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
515 if (iim == 0)
516 show_registers(regs);
517 debugger_trap_fatal(0 /* don't care */ , regs);
518 }
519 #endif
520 else if (iim == d->arch.breakimm &&
521 ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
522 /* by default, do not continue */
523 v->arch.hypercall_continuation = 0;
525 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
526 if (!PSCBX(v, hypercall_continuation))
527 vcpu_increment_iip(current);
528 } else
529 reflect_interruption(isr, regs, vector);
530 } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
531 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
532 if (ia64_hyperprivop(iim, regs))
533 vcpu_increment_iip(current);
534 } else {
535 if (iim == 0)
536 die_if_kernel("bug check", regs, iim);
537 PSCB(v, iim) = iim;
538 reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
539 }
540 }
542 void
543 ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
544 unsigned long itir)
545 {
546 IA64FAULT vector;
548 vector = priv_emulate(current, regs, isr);
549 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
550 // Note: if a path results in a vector to reflect that requires
551 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
552 /*
553 * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
554 * see IA64_ILLOP_FAULT, ...
555 */
556 if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
557 isr = vector & 0xffUL;
558 vector = IA64_GENEX_VECTOR;
559 }
560 reflect_interruption(isr, regs, vector);
561 }
562 }
564 void
565 ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
566 unsigned long isr, unsigned long iim,
567 unsigned long vector)
568 {
569 struct vcpu *v = current;
570 unsigned long check_lazy_cover = 0;
571 unsigned long psr = regs->cr_ipsr;
572 unsigned long status;
574 /* Following faults shouldn't be seen from Xen itself */
575 BUG_ON(!(psr & IA64_PSR_CPL));
577 switch (vector) {
578 case 6:
579 vector = IA64_INST_KEY_MISS_VECTOR;
580 break;
581 case 7:
582 vector = IA64_DATA_KEY_MISS_VECTOR;
583 break;
584 case 8:
585 vector = IA64_DIRTY_BIT_VECTOR;
586 break;
587 case 9:
588 vector = IA64_INST_ACCESS_BIT_VECTOR;
589 break;
590 case 10:
591 check_lazy_cover = 1;
592 vector = IA64_DATA_ACCESS_BIT_VECTOR;
593 break;
594 case 20:
595 check_lazy_cover = 1;
596 vector = IA64_PAGE_NOT_PRESENT_VECTOR;
597 break;
598 case 21:
599 vector = IA64_KEY_PERMISSION_VECTOR;
600 break;
601 case 22:
602 vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
603 break;
604 case 23:
605 check_lazy_cover = 1;
606 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
607 break;
608 case 24:
609 vector = IA64_GENEX_VECTOR;
610 break;
611 case 25:
612 if (PSCB(v, hpsr_dfh)) {
613 PSCB(v, hpsr_dfh) = 0;
614 PSCB(v, hpsr_mfh) = 1;
615 if (__ia64_per_cpu_var(fp_owner) != v)
616 __ia64_load_fpu(v->arch._thread.fph);
617 }
618 if (!PSCB(v, vpsr_dfh)) {
619 regs->cr_ipsr &= ~IA64_PSR_DFH;
620 return;
621 }
622 vector = IA64_DISABLED_FPREG_VECTOR;
623 break;
624 case 26:
625 if (((isr >> 4L) & 0xfL) == 1) {
626 /* Fault is due to a register NaT consumption fault. */
627 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
628 printk("ia64_handle_reflection: handling regNaT "
629 "fault\n");
630 vector = IA64_NAT_CONSUMPTION_VECTOR;
631 break;
632 }
633 #if 1
634 // pass null pointer dereferences through with no error
635 // but retain debug output for non-zero ifa
636 if (!ifa) {
637 vector = IA64_NAT_CONSUMPTION_VECTOR;
638 break;
639 }
640 #endif
641 #ifdef CONFIG_PRIVIFY
642 /* Some privified operations are coded using reg+64 instead
643 of reg. */
644 printk("*** NaT fault... attempting to handle as privop\n");
645 printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
646 isr, ifa, regs->cr_iip, psr);
647 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
648 // certain NaT faults are higher priority than privop faults
649 vector = priv_emulate(v, regs, isr);
650 if (vector == IA64_NO_FAULT) {
651 printk("*** Handled privop masquerading as NaT "
652 "fault\n");
653 return;
654 }
655 #endif
656 vector = IA64_NAT_CONSUMPTION_VECTOR;
657 break;
658 case 27:
659 //printk("*** Handled speculation vector, itc=%lx!\n",
660 // ia64_get_itc());
661 PSCB(current, iim) = iim;
662 vector = IA64_SPECULATION_VECTOR;
663 break;
664 case 29:
665 vector = IA64_DEBUG_VECTOR;
666 if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG))
667 return;
668 break;
669 case 30:
670 // FIXME: Should we handle unaligned refs in Xen??
671 vector = IA64_UNALIGNED_REF_VECTOR;
672 break;
673 case 32:
674 status = handle_fpu_swa(1, regs, isr);
675 if (!status) {
676 vcpu_increment_iip(v);
677 return;
678 }
679 // fetch code fail
680 if (IA64_RETRY == status)
681 return;
682 printk("ia64_handle_reflection: handling FP fault\n");
683 vector = IA64_FP_FAULT_VECTOR;
684 break;
685 case 33:
686 status = handle_fpu_swa(0, regs, isr);
687 if (!status)
688 return;
689 // fetch code fail
690 if (IA64_RETRY == status)
691 return;
692 printk("ia64_handle_reflection: handling FP trap\n");
693 vector = IA64_FP_TRAP_VECTOR;
694 break;
695 case 34:
696 if (isr & (1UL << 4))
697 printk("ia64_handle_reflection: handling "
698 "unimplemented instruction address %s\n",
699 (isr & (1UL<<32)) ? "fault" : "trap");
700 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
701 break;
702 case 35:
703 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
704 if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH))
705 return;
706 break;
707 case 36:
708 vector = IA64_SINGLE_STEP_TRAP_VECTOR;
709 if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP))
710 return;
711 break;
713 default:
714 panic_domain(regs, "ia64_handle_reflection: "
715 "unhandled vector=0x%lx\n", vector);
716 return;
717 }
718 if (check_lazy_cover && (isr & IA64_ISR_IR) &&
719 handle_lazy_cover(v, regs))
720 return;
721 PSCB(current, ifa) = ifa;
722 PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
723 reflect_interruption(isr, regs, vector);
724 }
726 void
727 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
728 unsigned long isr, struct pt_regs *regs)
729 {
730 struct vcpu *v = current;
731 struct domain *d = current->domain;
732 unsigned long gpfn;
733 unsigned long pte = 0;
734 struct vhpt_lf_entry *vlfe;
736 /*
737 * v->arch.vhpt_pg_shift shouldn't be used here.
738 * Currently dirty page logging bitmap is allocated based
739 * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
740 * If we want to log dirty pages in finer grained when
741 * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
742 * revise the ABI and update this function and the related
743 * tool stack (live relocation).
744 */
745 unsigned long vhpt_pg_shift = PAGE_SHIFT;
747 /* There are 2 jobs to do:
748 - marking the page as dirty (the metaphysical address must be
749 extracted to do that).
750 - reflecting or not the fault (the virtual Dirty bit must be
751 extracted to decide).
752 Unfortunatly these informations are not immediatly available!
753 */
755 /* Extract the metaphysical address.
756 Try to get it from VHPT and M2P as we need the flags. */
757 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
758 pte = vlfe->page_flags;
759 if (vlfe->ti_tag == ia64_ttag(ifa)) {
760 /* The VHPT entry is valid. */
761 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
762 vhpt_pg_shift);
763 BUG_ON(gpfn == INVALID_M2P_ENTRY);
764 } else {
765 unsigned long itir, iha;
766 IA64FAULT fault;
768 /* The VHPT entry is not valid. */
769 vlfe = NULL;
771 /* FIXME: gives a chance to tpa, as the TC was valid. */
773 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
775 /* Try again! */
776 if (fault != IA64_NO_FAULT) {
777 /* This will trigger a dtlb miss. */
778 ia64_ptcl(ifa, vhpt_pg_shift << 2);
779 return;
780 }
781 gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
782 if (pte & _PAGE_D)
783 pte |= _PAGE_VIRT_D;
784 }
786 /* Set the dirty bit in the bitmap. */
787 shadow_mark_page_dirty(d, gpfn);
789 /* Update the local TC/VHPT and decides wether or not the fault should
790 be reflected.
791 SMP note: we almost ignore the other processors. The shadow_bitmap
792 has been atomically updated. If the dirty fault happen on another
793 processor, it will do its job.
794 */
796 if (pte != 0) {
797 /* We will know how to handle the fault. */
799 if (pte & _PAGE_VIRT_D) {
800 /* Rewrite VHPT entry.
801 There is no race here because only the
802 cpu VHPT owner can write page_flags. */
803 if (vlfe)
804 vlfe->page_flags = pte | _PAGE_D;
806 /* Purge the TC locally.
807 It will be reloaded from the VHPT iff the
808 VHPT entry is still valid. */
809 ia64_ptcl(ifa, vhpt_pg_shift << 2);
811 atomic64_inc(&d->arch.shadow_fault_count);
812 } else {
813 /* Reflect.
814 In this case there is no need to purge. */
815 ia64_handle_reflection(ifa, regs, isr, 0, 8);
816 }
817 } else {
818 /* We don't know wether or not the fault must be
819 reflected. The VHPT entry is not valid. */
820 /* FIXME: in metaphysical mode, we could do an ITC now. */
821 ia64_ptcl(ifa, vhpt_pg_shift << 2);
822 }
823 }