ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 15766:3cd445aecf59

[IA64] Fixes for 4k page support.

Some code is dependent on PAGE_SIZE and shouldn't be changed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Tue Aug 28 12:30:31 2007 -0600 (2007-08-28)
parents b5dbf184df6c
children 42caadb14edb
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
16 #include <xen/perfc.h>
17 #include <xen/mm.h>
19 #include <asm/system.h>
20 #include <asm/processor.h>
21 #include <xen/irq.h>
22 #include <xen/event.h>
23 #include <asm/privop.h>
24 #include <asm/vcpu.h>
25 #include <asm/ia64_int.h>
26 #include <asm/dom_fw.h>
27 #include <asm/vhpt.h>
28 #include <asm/debugger.h>
29 #include <asm/fpswa.h>
30 #include <asm/bundle.h>
31 #include <asm/asm-xsi-offsets.h>
32 #include <asm/shadow.h>
33 #include <asm/uaccess.h>
34 #include <asm/p2m_entry.h>
36 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37 /* FIXME: where these declarations shold be there ? */
38 extern int ia64_hyperprivop(unsigned long, REGS *);
39 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41 // note IA64_PSR_PK removed from following, why is this necessary?
42 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
43 IA64_PSR_DT | IA64_PSR_RT | \
44 IA64_PSR_IT | IA64_PSR_BN)
46 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
47 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
48 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
49 IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \
50 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
51 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
53 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
55 // should never panic domain... if it does, stack may have been overrun
56 static void check_bad_nested_interruption(unsigned long isr,
57 struct pt_regs *regs,
58 unsigned long vector)
59 {
60 struct vcpu *v = current;
62 if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
63 panic_domain(regs,
64 "psr.dt off, trying to deliver nested dtlb!\n");
65 }
66 vector &= ~0xf;
67 if (vector != IA64_DATA_TLB_VECTOR &&
68 vector != IA64_ALT_DATA_TLB_VECTOR &&
69 vector != IA64_VHPT_TRANS_VECTOR) {
70 panic_domain(regs, "psr.ic off, delivering fault=%lx,"
71 "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
72 vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
73 isr, PSCB(v, iip));
74 }
75 }
77 static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
78 unsigned long vector)
79 {
80 struct vcpu *v = current;
82 if (!PSCB(v, interrupt_collection_enabled))
83 check_bad_nested_interruption(isr, regs, vector);
84 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
85 PSCB(v, precover_ifs) = regs->cr_ifs;
86 PSCB(v, ipsr) = vcpu_get_psr(v);
87 vcpu_bsw0(v);
88 PSCB(v, isr) = isr;
89 PSCB(v, iip) = regs->cr_iip;
90 PSCB(v, ifs) = 0;
92 regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
93 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
94 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
95 if (PSCB(v, dcr) & IA64_DCR_BE)
96 regs->cr_ipsr |= IA64_PSR_BE;
98 if (PSCB(v, hpsr_dfh))
99 regs->cr_ipsr |= IA64_PSR_DFH;
100 PSCB(v, vpsr_dfh) = 0;
101 v->vcpu_info->evtchn_upcall_mask = 1;
102 PSCB(v, interrupt_collection_enabled) = 0;
104 perfc_incra(slow_reflect, vector >> 8);
105 }
107 void reflect_event(void)
108 {
109 struct vcpu *v = current;
110 struct pt_regs *regs;
111 unsigned long isr;
113 if (!event_pending(v))
114 return;
116 /* Sanity check */
117 if (is_idle_vcpu(v)) {
118 //printk("WARN: invocation to reflect_event in nested xen\n");
119 return;
120 }
122 regs = vcpu_regs(v);
124 isr = regs->cr_ipsr & IA64_PSR_RI;
126 if (!PSCB(v, interrupt_collection_enabled))
127 printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
128 "isr=%lx,viip=0x%lx\n",
129 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
130 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
131 PSCB(v, precover_ifs) = regs->cr_ifs;
132 PSCB(v, ipsr) = vcpu_get_psr(v);
133 vcpu_bsw0(v);
134 PSCB(v, isr) = isr;
135 PSCB(v, iip) = regs->cr_iip;
136 PSCB(v, ifs) = 0;
138 regs->cr_iip = v->arch.event_callback_ip;
139 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
140 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
141 if (PSCB(v, dcr) & IA64_DCR_BE)
142 regs->cr_ipsr |= IA64_PSR_BE;
144 if (PSCB(v, hpsr_dfh))
145 regs->cr_ipsr |= IA64_PSR_DFH;
146 PSCB(v, vpsr_dfh) = 0;
147 v->vcpu_info->evtchn_upcall_mask = 1;
148 PSCB(v, interrupt_collection_enabled) = 0;
149 }
151 static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
152 {
153 if (!PSCB(v, interrupt_collection_enabled)) {
154 PSCB(v, ifs) = regs->cr_ifs;
155 regs->cr_ifs = 0;
156 perfc_incr(lazy_cover);
157 return 1; // retry same instruction with cr.ifs off
158 }
159 return 0;
160 }
162 void ia64_do_page_fault(unsigned long address, unsigned long isr,
163 struct pt_regs *regs, unsigned long itir)
164 {
165 unsigned long iip = regs->cr_iip, iha;
166 // FIXME should validate address here
167 unsigned long pteval;
168 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
169 IA64FAULT fault;
170 int is_ptc_l_needed = 0;
171 ia64_itir_t _itir = {.itir = itir};
173 if ((isr & IA64_ISR_SP)
174 || ((isr & IA64_ISR_NA)
175 && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
176 /*
177 * This fault was due to a speculative load or lfetch.fault,
178 * set the "ed" bit in the psr to ensure forward progress.
179 * (Target register will get a NaT for ld.s, lfetch will be
180 * canceled.)
181 */
182 ia64_psr(regs)->ed = 1;
183 return;
184 }
186 again:
187 fault = vcpu_translate(current, address, is_data, &pteval,
188 &itir, &iha);
189 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
190 struct p2m_entry entry;
191 unsigned long m_pteval;
192 m_pteval = translate_domain_pte(pteval, address, itir,
193 &(_itir.itir), &entry);
194 vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
195 m_pteval, pteval, _itir.itir, &entry);
196 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
197 p2m_entry_retry(&entry)) {
198 /* dtlb has been purged in-between. This dtlb was
199 matching. Undo the work. */
200 vcpu_flush_tlb_vhpt_range(address, _itir.ps);
202 // the stale entry which we inserted above
203 // may remains in tlb cache.
204 // we don't purge it now hoping next itc purges it.
205 is_ptc_l_needed = 1;
206 goto again;
207 }
208 return;
209 }
211 if (is_ptc_l_needed)
212 vcpu_ptc_l(current, address, _itir.ps);
213 if (!guest_mode(regs)) {
214 /* The fault occurs inside Xen. */
215 if (!ia64_done_with_exception(regs)) {
216 // should never happen. If it does, region 0 addr may
217 // indicate a bad xen pointer
218 printk("*** xen_handle_domain_access: exception table"
219 " lookup failed, iip=0x%lx, addr=0x%lx, "
220 "spinning...\n", iip, address);
221 panic_domain(regs, "*** xen_handle_domain_access: "
222 "exception table lookup failed, "
223 "iip=0x%lx, addr=0x%lx, spinning...\n",
224 iip, address);
225 }
226 return;
227 }
229 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
230 return;
232 if (!PSCB(current, interrupt_collection_enabled)) {
233 check_bad_nested_interruption(isr, regs, fault);
234 //printk("Delivering NESTED DATA TLB fault\n");
235 fault = IA64_DATA_NESTED_TLB_VECTOR;
236 regs->cr_iip =
237 ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
238 regs->cr_ipsr =
239 (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
240 regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
241 IA64_PSR_CPL0_BIT);
242 if (PSCB(current, dcr) & IA64_DCR_BE)
243 regs->cr_ipsr |= IA64_PSR_BE;
245 if (PSCB(current, hpsr_dfh))
246 regs->cr_ipsr |= IA64_PSR_DFH;
247 PSCB(current, vpsr_dfh) = 0;
248 perfc_incra(slow_reflect, fault >> 8);
249 return;
250 }
252 PSCB(current, itir) = itir;
253 PSCB(current, iha) = iha;
254 PSCB(current, ifa) = address;
255 reflect_interruption(isr, regs, fault);
256 }
258 fpswa_interface_t *fpswa_interface = 0;
260 void __init trap_init(void)
261 {
262 if (ia64_boot_param->fpswa)
263 /* FPSWA fixup: make the interface pointer a virtual address */
264 fpswa_interface = __va(ia64_boot_param->fpswa);
265 else
266 printk("No FPSWA supported.\n");
267 }
269 static fpswa_ret_t
270 fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
271 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
272 unsigned long *ifs, struct pt_regs *regs)
273 {
274 fp_state_t fp_state;
275 fpswa_ret_t ret;
277 if (!fpswa_interface)
278 return (fpswa_ret_t) {-1, 0, 0, 0};
280 memset(&fp_state, 0, sizeof(fp_state_t));
282 /*
283 * compute fp_state. only FP registers f6 - f11 are used by the
284 * kernel, so set those bits in the mask and set the low volatile
285 * pointer to point to these registers.
286 */
287 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
289 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
290 /*
291 * unsigned long (*EFI_FPSWA) (
292 * unsigned long trap_type,
293 * void *Bundle,
294 * unsigned long *pipsr,
295 * unsigned long *pfsr,
296 * unsigned long *pisr,
297 * unsigned long *ppreds,
298 * unsigned long *pifs,
299 * void *fp_state);
300 */
301 ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
302 ipsr, fpsr, isr, pr, ifs, &fp_state);
304 return ret;
305 }
307 /*
308 * Handle floating-point assist faults and traps for domain.
309 */
310 unsigned long
311 handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
312 {
313 struct vcpu *v = current;
314 IA64_BUNDLE bundle;
315 unsigned long fault_ip;
316 fpswa_ret_t ret;
318 fault_ip = regs->cr_iip;
319 /*
320 * When the FP trap occurs, the trapping instruction is completed.
321 * If ipsr.ri == 0, there is the trapping instruction in previous
322 * bundle.
323 */
324 if (!fp_fault && (ia64_psr(regs)->ri == 0))
325 fault_ip -= 16;
327 if (VMX_DOMAIN(current)) {
328 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
329 return IA64_RETRY;
330 } else
331 bundle = __get_domain_bundle(fault_ip);
333 if (!bundle.i64[0] && !bundle.i64[1]) {
334 printk("%s: floating-point bundle at 0x%lx not mapped\n",
335 __FUNCTION__, fault_ip);
336 return -1;
337 }
339 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
340 &isr, &regs->pr, &regs->cr_ifs, regs);
342 if (ret.status) {
343 PSCBX(v, fpswa_ret) = ret;
344 printk("%s(%s): fp_emulate() returned %ld\n",
345 __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
346 }
348 return ret.status;
349 }
351 void
352 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
353 unsigned long iim, unsigned long itir, unsigned long arg5,
354 unsigned long arg6, unsigned long arg7, unsigned long stack)
355 {
356 struct pt_regs *regs = (struct pt_regs *)&stack;
357 unsigned long code;
358 static const char *const reason[] = {
359 "IA-64 Illegal Operation fault",
360 "IA-64 Privileged Operation fault",
361 "IA-64 Privileged Register fault",
362 "IA-64 Reserved Register/Field fault",
363 "Disabled Instruction Set Transition fault",
364 "Unknown fault 5", "Unknown fault 6",
365 "Unknown fault 7", "Illegal Hazard fault",
366 "Unknown fault 9", "Unknown fault 10",
367 "Unknown fault 11", "Unknown fault 12",
368 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
369 };
371 printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
372 "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
373 regs->cr_iip, regs->cr_ipsr, isr);
375 if ((isr & IA64_ISR_NA) &&
376 ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
377 /*
378 * This fault was due to lfetch.fault, set "ed" bit in the
379 * psr to cancel the lfetch.
380 */
381 ia64_psr(regs)->ed = 1;
382 printk("ia64_fault: handled lfetch.fault\n");
383 return;
384 }
386 switch (vector) {
387 case 0:
388 printk("VHPT Translation.\n");
389 break;
391 case 4:
392 printk("Alt DTLB.\n");
393 break;
395 case 6:
396 printk("Instruction Key Miss.\n");
397 break;
399 case 7:
400 printk("Data Key Miss.\n");
401 break;
403 case 8:
404 printk("Dirty-bit.\n");
405 break;
407 case 20:
408 printk("Page Not Found.\n");
409 break;
411 case 21:
412 printk("Key Permission.\n");
413 break;
415 case 22:
416 printk("Instruction Access Rights.\n");
417 break;
419 case 24: /* General Exception */
420 code = (isr >> 4) & 0xf;
421 printk("General Exception: %s%s.\n", reason[code],
422 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
423 " (data access)") : "");
424 if (code == 8) {
425 #ifdef CONFIG_IA64_PRINT_HAZARDS
426 printk("%s[%d]: possible hazard @ ip=%016lx "
427 "(pr = %016lx)\n", current->comm, current->pid,
428 regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
429 #endif
430 printk("ia64_fault: returning on hazard\n");
431 return;
432 }
433 break;
435 case 25:
436 printk("Disabled FP-Register.\n");
437 break;
439 case 26:
440 printk("NaT consumption.\n");
441 break;
443 case 29:
444 printk("Debug.\n");
445 break;
447 case 30:
448 printk("Unaligned Reference.\n");
449 break;
451 case 31:
452 printk("Unsupported data reference.\n");
453 break;
455 case 32:
456 printk("Floating-Point Fault.\n");
457 break;
459 case 33:
460 printk("Floating-Point Trap.\n");
461 break;
463 case 34:
464 printk("Lower Privilege Transfer Trap.\n");
465 break;
467 case 35:
468 printk("Taken Branch Trap.\n");
469 break;
471 case 36:
472 printk("Single Step Trap.\n");
473 break;
475 case 45:
476 printk("IA-32 Exception.\n");
477 break;
479 case 46:
480 printk("IA-32 Intercept.\n");
481 break;
483 case 47:
484 printk("IA-32 Interrupt.\n");
485 break;
487 default:
488 printk("Fault %lu\n", vector);
489 break;
490 }
492 show_registers(regs);
493 panic("Fault in Xen.\n");
494 }
496 /* Also read in hyperprivop.S */
497 int first_break = 0;
499 void
500 ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
501 unsigned long iim)
502 {
503 struct domain *d = current->domain;
504 struct vcpu *v = current;
505 IA64FAULT vector;
507 /* FIXME: don't hardcode constant */
508 if ((iim == 0x80001 || iim == 0x80002)
509 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
510 do_ssc(vcpu_get_gr(current, 36), regs);
511 }
512 #ifdef CRASH_DEBUG
513 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
514 if (iim == 0)
515 show_registers(regs);
516 debugger_trap_fatal(0 /* don't care */ , regs);
517 }
518 #endif
519 else if (iim == d->arch.breakimm &&
520 ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
521 /* by default, do not continue */
522 v->arch.hypercall_continuation = 0;
524 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
525 if (!PSCBX(v, hypercall_continuation))
526 vcpu_increment_iip(current);
527 } else
528 reflect_interruption(isr, regs, vector);
529 } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
530 && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
531 if (ia64_hyperprivop(iim, regs))
532 vcpu_increment_iip(current);
533 } else {
534 if (iim == 0)
535 die_if_kernel("bug check", regs, iim);
536 PSCB(v, iim) = iim;
537 reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
538 }
539 }
541 void
542 ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
543 unsigned long itir)
544 {
545 IA64FAULT vector;
547 vector = priv_emulate(current, regs, isr);
548 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
549 // Note: if a path results in a vector to reflect that requires
550 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
551 /*
552 * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
553 * see IA64_ILLOP_FAULT, ...
554 */
555 if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
556 isr = vector & 0xffUL;
557 vector = IA64_GENEX_VECTOR;
558 }
559 reflect_interruption(isr, regs, vector);
560 }
561 }
563 void
564 ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
565 unsigned long isr, unsigned long iim,
566 unsigned long vector)
567 {
568 struct vcpu *v = current;
569 unsigned long check_lazy_cover = 0;
570 unsigned long psr = regs->cr_ipsr;
571 unsigned long status;
573 /* Following faults shouldn't be seen from Xen itself */
574 BUG_ON(!(psr & IA64_PSR_CPL));
576 switch (vector) {
577 case 6:
578 vector = IA64_INST_KEY_MISS_VECTOR;
579 break;
580 case 7:
581 vector = IA64_DATA_KEY_MISS_VECTOR;
582 break;
583 case 8:
584 vector = IA64_DIRTY_BIT_VECTOR;
585 break;
586 case 9:
587 vector = IA64_INST_ACCESS_BIT_VECTOR;
588 break;
589 case 10:
590 check_lazy_cover = 1;
591 vector = IA64_DATA_ACCESS_BIT_VECTOR;
592 break;
593 case 20:
594 check_lazy_cover = 1;
595 vector = IA64_PAGE_NOT_PRESENT_VECTOR;
596 break;
597 case 22:
598 vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
599 break;
600 case 23:
601 check_lazy_cover = 1;
602 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
603 break;
604 case 24:
605 vector = IA64_GENEX_VECTOR;
606 break;
607 case 25:
608 if (PSCB(v, hpsr_dfh)) {
609 PSCB(v, hpsr_dfh) = 0;
610 PSCB(v, hpsr_mfh) = 1;
611 if (__ia64_per_cpu_var(fp_owner) != v)
612 __ia64_load_fpu(v->arch._thread.fph);
613 }
614 if (!PSCB(v, vpsr_dfh)) {
615 regs->cr_ipsr &= ~IA64_PSR_DFH;
616 return;
617 }
618 vector = IA64_DISABLED_FPREG_VECTOR;
619 break;
620 case 26:
621 if (((isr >> 4L) & 0xfL) == 1) {
622 /* Fault is due to a register NaT consumption fault. */
623 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
624 printk("ia64_handle_reflection: handling regNaT "
625 "fault\n");
626 vector = IA64_NAT_CONSUMPTION_VECTOR;
627 break;
628 }
629 #if 1
630 // pass null pointer dereferences through with no error
631 // but retain debug output for non-zero ifa
632 if (!ifa) {
633 vector = IA64_NAT_CONSUMPTION_VECTOR;
634 break;
635 }
636 #endif
637 #ifdef CONFIG_PRIVIFY
638 /* Some privified operations are coded using reg+64 instead
639 of reg. */
640 printk("*** NaT fault... attempting to handle as privop\n");
641 printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
642 isr, ifa, regs->cr_iip, psr);
643 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
644 // certain NaT faults are higher priority than privop faults
645 vector = priv_emulate(v, regs, isr);
646 if (vector == IA64_NO_FAULT) {
647 printk("*** Handled privop masquerading as NaT "
648 "fault\n");
649 return;
650 }
651 #endif
652 vector = IA64_NAT_CONSUMPTION_VECTOR;
653 break;
654 case 27:
655 //printk("*** Handled speculation vector, itc=%lx!\n",
656 // ia64_get_itc());
657 PSCB(current, iim) = iim;
658 vector = IA64_SPECULATION_VECTOR;
659 break;
660 case 29:
661 vector = IA64_DEBUG_VECTOR;
662 if (debugger_trap_entry(vector,regs))
663 return;
664 break;
665 case 30:
666 // FIXME: Should we handle unaligned refs in Xen??
667 vector = IA64_UNALIGNED_REF_VECTOR;
668 break;
669 case 32:
670 status = handle_fpu_swa(1, regs, isr);
671 if (!status) {
672 vcpu_increment_iip(v);
673 return;
674 }
675 // fetch code fail
676 if (IA64_RETRY == status)
677 return;
678 printk("ia64_handle_reflection: handling FP fault\n");
679 vector = IA64_FP_FAULT_VECTOR;
680 break;
681 case 33:
682 status = handle_fpu_swa(0, regs, isr);
683 if (!status)
684 return;
685 // fetch code fail
686 if (IA64_RETRY == status)
687 return;
688 printk("ia64_handle_reflection: handling FP trap\n");
689 vector = IA64_FP_TRAP_VECTOR;
690 break;
691 case 34:
692 if (isr & (1UL << 4))
693 printk("ia64_handle_reflection: handling "
694 "unimplemented instruction address %s\n",
695 (isr & (1UL<<32)) ? "fault" : "trap");
696 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
697 break;
698 case 35:
699 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
700 if (debugger_trap_entry(vector,regs))
701 return;
702 break;
703 case 36:
704 vector = IA64_SINGLE_STEP_TRAP_VECTOR;
705 if (debugger_trap_entry(vector,regs))
706 return;
707 break;
709 default:
710 panic_domain(regs, "ia64_handle_reflection: "
711 "unhandled vector=0x%lx\n", vector);
712 return;
713 }
714 if (check_lazy_cover && (isr & IA64_ISR_IR) &&
715 handle_lazy_cover(v, regs))
716 return;
717 PSCB(current, ifa) = ifa;
718 PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
719 reflect_interruption(isr, regs, vector);
720 }
722 void
723 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
724 unsigned long isr, struct pt_regs *regs)
725 {
726 struct vcpu *v = current;
727 struct domain *d = current->domain;
728 unsigned long gpfn;
729 unsigned long pte = 0;
730 struct vhpt_lf_entry *vlfe;
732 /*
733 * v->arch.vhpt_pg_shift shouldn't be used here.
734 * Currently dirty page logging bitmap is allocated based
735 * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
736 * If we want to log dirty pages in finer grained when
737 * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
738 * revise the ABI and update this function and the related
739 * tool stack (live relocation).
740 */
741 unsigned long vhpt_pg_shift = PAGE_SHIFT;
743 /* There are 2 jobs to do:
744 - marking the page as dirty (the metaphysical address must be
745 extracted to do that).
746 - reflecting or not the fault (the virtual Dirty bit must be
747 extracted to decide).
748 Unfortunatly these informations are not immediatly available!
749 */
751 /* Extract the metaphysical address.
752 Try to get it from VHPT and M2P as we need the flags. */
753 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
754 pte = vlfe->page_flags;
755 if (vlfe->ti_tag == ia64_ttag(ifa)) {
756 /* The VHPT entry is valid. */
757 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
758 vhpt_pg_shift);
759 BUG_ON(gpfn == INVALID_M2P_ENTRY);
760 } else {
761 unsigned long itir, iha;
762 IA64FAULT fault;
764 /* The VHPT entry is not valid. */
765 vlfe = NULL;
767 /* FIXME: gives a chance to tpa, as the TC was valid. */
769 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
771 /* Try again! */
772 if (fault != IA64_NO_FAULT) {
773 /* This will trigger a dtlb miss. */
774 ia64_ptcl(ifa, vhpt_pg_shift << 2);
775 return;
776 }
777 gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
778 if (pte & _PAGE_D)
779 pte |= _PAGE_VIRT_D;
780 }
782 /* Set the dirty bit in the bitmap. */
783 shadow_mark_page_dirty(d, gpfn);
785 /* Update the local TC/VHPT and decides wether or not the fault should
786 be reflected.
787 SMP note: we almost ignore the other processors. The shadow_bitmap
788 has been atomically updated. If the dirty fault happen on another
789 processor, it will do its job.
790 */
792 if (pte != 0) {
793 /* We will know how to handle the fault. */
795 if (pte & _PAGE_VIRT_D) {
796 /* Rewrite VHPT entry.
797 There is no race here because only the
798 cpu VHPT owner can write page_flags. */
799 if (vlfe)
800 vlfe->page_flags = pte | _PAGE_D;
802 /* Purge the TC locally.
803 It will be reloaded from the VHPT iff the
804 VHPT entry is still valid. */
805 ia64_ptcl(ifa, vhpt_pg_shift << 2);
807 atomic64_inc(&d->arch.shadow_fault_count);
808 } else {
809 /* Reflect.
810 In this case there is no need to purge. */
811 ia64_handle_reflection(ifa, regs, isr, 0, 8);
812 }
813 } else {
814 /* We don't know wether or not the fault must be
815 reflected. The VHPT entry is not valid. */
816 /* FIXME: in metaphysical mode, we could do an ITC now. */
817 ia64_ptcl(ifa, vhpt_pg_shift << 2);
818 }
819 }