ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 12669:76d379e3f1d7

[IA64] Check CPL for break 0x80001 case

This is a patch which intends for fixing ltrace problem. When I
execute a command "ltrace ps" on dom0, it makes dom0 hung. It comes
from "break 0x80001" is shared by ltrace(CPL=3) and hpsim(CPL=2).
To avoid this problem, I just add check CPL in ia64_break_fault code.

Signed-off-by: Atsushi SAKAI <sakaia@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Dec 01 11:12:00 2006 -0700 (2006-12-01)
parents d6e40274f923
children cf23494af72c
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
16 #include <xen/perfc.h>
17 #include <xen/mm.h>
19 #include <asm/system.h>
20 #include <asm/processor.h>
21 #include <xen/irq.h>
22 #include <xen/event.h>
23 #include <asm/privop.h>
24 #include <asm/vcpu.h>
25 #include <asm/ia64_int.h>
26 #include <asm/dom_fw.h>
27 #include <asm/vhpt.h>
28 #include <asm/debugger.h>
29 #include <asm/fpswa.h>
30 #include <asm/bundle.h>
31 #include <asm/asm-xsi-offsets.h>
32 #include <asm/shadow.h>
33 #include <asm/uaccess.h>
34 #include <asm/p2m_entry.h>
36 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37 /* FIXME: where these declarations shold be there ? */
38 extern int ia64_hyperprivop(unsigned long, REGS *);
39 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
42 // note IA64_PSR_PK removed from following, why is this necessary?
43 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
44 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
45 IA64_PSR_IT | IA64_PSR_BN)
47 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
48 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
49 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
50 IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \
51 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
52 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
54 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
56 // should never panic domain... if it does, stack may have been overrun
57 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs,
58 unsigned long vector)
59 {
60 struct vcpu *v = current;
62 if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
63 panic_domain(regs,
64 "psr.dt off, trying to deliver nested dtlb!\n");
65 }
66 vector &= ~0xf;
67 if (vector != IA64_DATA_TLB_VECTOR &&
68 vector != IA64_ALT_DATA_TLB_VECTOR &&
69 vector != IA64_VHPT_TRANS_VECTOR) {
70 panic_domain(regs, "psr.ic off, delivering fault=%lx,"
71 "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
72 vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
73 isr, PSCB(v, iip));
74 }
75 }
77 void reflect_interruption(unsigned long isr, struct pt_regs *regs,
78 unsigned long vector)
79 {
80 struct vcpu *v = current;
82 if (!PSCB(v, interrupt_collection_enabled))
83 check_bad_nested_interruption(isr, regs, vector);
84 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
85 PSCB(v, precover_ifs) = regs->cr_ifs;
86 vcpu_bsw0(v);
87 PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
88 PSCB(v, isr) = isr;
89 PSCB(v, iip) = regs->cr_iip;
90 PSCB(v, ifs) = 0;
91 PSCB(v, incomplete_regframe) = 0;
93 regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
94 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
95 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
97 v->vcpu_info->evtchn_upcall_mask = 1;
98 PSCB(v, interrupt_collection_enabled) = 0;
100 perfc_incra(slow_reflect, vector >> 8);
101 }
103 static unsigned long pending_false_positive = 0;
105 void reflect_extint(struct pt_regs *regs)
106 {
107 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
108 struct vcpu *v = current;
109 static int first_extint = 1;
111 if (first_extint) {
112 printk("Delivering first extint to domain: isr=0x%lx, "
113 "iip=0x%lx\n", isr, regs->cr_iip);
114 first_extint = 0;
115 }
116 if (vcpu_timer_pending_early(v))
117 printk("*#*#*#* about to deliver early timer to domain %d!!\n",
118 v->domain->domain_id);
119 PSCB(current, itir) = 0;
120 reflect_interruption(isr, regs, IA64_EXTINT_VECTOR);
121 }
123 void reflect_event(struct pt_regs *regs)
124 {
125 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
126 struct vcpu *v = current;
128 /* Sanity check */
129 if (is_idle_vcpu(v) || !user_mode(regs)) {
130 //printk("WARN: invocation to reflect_event in nested xen\n");
131 return;
132 }
134 if (!event_pending(v))
135 return;
137 if (!PSCB(v, interrupt_collection_enabled))
138 printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
139 "isr=%lx,viip=0x%lx\n",
140 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
141 PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
142 PSCB(v, precover_ifs) = regs->cr_ifs;
143 vcpu_bsw0(v);
144 PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
145 PSCB(v, isr) = isr;
146 PSCB(v, iip) = regs->cr_iip;
147 PSCB(v, ifs) = 0;
148 PSCB(v, incomplete_regframe) = 0;
150 regs->cr_iip = v->arch.event_callback_ip;
151 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
152 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
154 v->vcpu_info->evtchn_upcall_mask = 1;
155 PSCB(v, interrupt_collection_enabled) = 0;
156 }
158 // ONLY gets called from ia64_leave_kernel
159 // ONLY call with interrupts disabled?? (else might miss one?)
160 // NEVER successful if already reflecting a trap/fault because psr.i==0
161 void deliver_pending_interrupt(struct pt_regs *regs)
162 {
163 struct domain *d = current->domain;
164 struct vcpu *v = current;
165 // FIXME: Will this work properly if doing an RFI???
166 if (!is_idle_domain(d) && user_mode(regs)) {
167 if (vcpu_deliverable_interrupts(v))
168 reflect_extint(regs);
169 else if (PSCB(v, pending_interruption))
170 ++pending_false_positive;
171 }
172 }
174 static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
175 {
176 if (!PSCB(v, interrupt_collection_enabled)) {
177 PSCB(v, ifs) = regs->cr_ifs;
178 PSCB(v, incomplete_regframe) = 1;
179 regs->cr_ifs = 0;
180 perfc_incrc(lazy_cover);
181 return 1; // retry same instruction with cr.ifs off
182 }
183 return 0;
184 }
186 void ia64_do_page_fault(unsigned long address, unsigned long isr,
187 struct pt_regs *regs, unsigned long itir)
188 {
189 unsigned long iip = regs->cr_iip, iha;
190 // FIXME should validate address here
191 unsigned long pteval;
192 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
193 IA64FAULT fault;
194 int is_ptc_l_needed = 0;
195 u64 logps;
197 if ((isr & IA64_ISR_SP)
198 || ((isr & IA64_ISR_NA)
199 && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
200 /*
201 * This fault was due to a speculative load or lfetch.fault,
202 * set the "ed" bit in the psr to ensure forward progress.
203 * (Target register will get a NaT for ld.s, lfetch will be
204 * canceled.)
205 */
206 ia64_psr(regs)->ed = 1;
207 return;
208 }
210 again:
211 fault = vcpu_translate(current, address, is_data, &pteval,
212 &itir, &iha);
213 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
214 struct p2m_entry entry;
215 unsigned long m_pteval;
216 m_pteval = translate_domain_pte(pteval, address, itir,
217 &logps, &entry);
218 vcpu_itc_no_srlz(current, (is_data ? 2 : 1) | 4,
219 address, m_pteval, pteval, logps, &entry);
220 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
221 p2m_entry_retry(&entry)) {
222 /* dtlb has been purged in-between. This dtlb was
223 matching. Undo the work. */
224 vcpu_flush_tlb_vhpt_range(address, logps);
226 // the stale entry which we inserted above
227 // may remains in tlb cache.
228 // we don't purge it now hoping next itc purges it.
229 is_ptc_l_needed = 1;
230 goto again;
231 }
232 return;
233 }
235 if (is_ptc_l_needed)
236 vcpu_ptc_l(current, address, logps);
237 if (!user_mode(regs)) {
238 /* The fault occurs inside Xen. */
239 if (!ia64_done_with_exception(regs)) {
240 // should never happen. If it does, region 0 addr may
241 // indicate a bad xen pointer
242 printk("*** xen_handle_domain_access: exception table"
243 " lookup failed, iip=0x%lx, addr=0x%lx, "
244 "spinning...\n", iip, address);
245 panic_domain(regs, "*** xen_handle_domain_access: "
246 "exception table lookup failed, "
247 "iip=0x%lx, addr=0x%lx, spinning...\n",
248 iip, address);
249 }
250 return;
251 }
253 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
254 return;
256 if (!PSCB(current, interrupt_collection_enabled)) {
257 check_bad_nested_interruption(isr, regs, fault);
258 //printk("Delivering NESTED DATA TLB fault\n");
259 fault = IA64_DATA_NESTED_TLB_VECTOR;
260 regs->cr_iip =
261 ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
262 regs->cr_ipsr =
263 (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
264 // NOTE: nested trap must NOT pass PSCB address
265 //regs->r31 = (unsigned long) &PSCB(current);
266 perfc_incra(slow_reflect, fault >> 8);
267 return;
268 }
270 PSCB(current, itir) = itir;
271 PSCB(current, iha) = iha;
272 PSCB(current, ifa) = address;
273 reflect_interruption(isr, regs, fault);
274 }
276 fpswa_interface_t *fpswa_interface = 0;
278 void trap_init(void)
279 {
280 if (ia64_boot_param->fpswa)
281 /* FPSWA fixup: make the interface pointer a virtual address */
282 fpswa_interface = __va(ia64_boot_param->fpswa);
283 else
284 printk("No FPSWA supported.\n");
285 }
287 static fpswa_ret_t
288 fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
289 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
290 unsigned long *ifs, struct pt_regs *regs)
291 {
292 fp_state_t fp_state;
293 fpswa_ret_t ret;
295 if (!fpswa_interface)
296 return (fpswa_ret_t) {-1, 0, 0, 0};
298 memset(&fp_state, 0, sizeof(fp_state_t));
300 /*
301 * compute fp_state. only FP registers f6 - f11 are used by the
302 * kernel, so set those bits in the mask and set the low volatile
303 * pointer to point to these registers.
304 */
305 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
307 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
308 /*
309 * unsigned long (*EFI_FPSWA) (
310 * unsigned long trap_type,
311 * void *Bundle,
312 * unsigned long *pipsr,
313 * unsigned long *pfsr,
314 * unsigned long *pisr,
315 * unsigned long *ppreds,
316 * unsigned long *pifs,
317 * void *fp_state);
318 */
319 ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
320 ipsr, fpsr, isr, pr, ifs, &fp_state);
322 return ret;
323 }
325 /*
326 * Handle floating-point assist faults and traps for domain.
327 */
328 unsigned long
329 handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
330 {
331 struct vcpu *v = current;
332 IA64_BUNDLE bundle;
333 unsigned long fault_ip;
334 fpswa_ret_t ret;
336 fault_ip = regs->cr_iip;
337 /*
338 * When the FP trap occurs, the trapping instruction is completed.
339 * If ipsr.ri == 0, there is the trapping instruction in previous
340 * bundle.
341 */
342 if (!fp_fault && (ia64_psr(regs)->ri == 0))
343 fault_ip -= 16;
345 if (VMX_DOMAIN(current)) {
346 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
347 return IA64_RETRY;
348 } else
349 bundle = __get_domain_bundle(fault_ip);
351 if (!bundle.i64[0] && !bundle.i64[1]) {
352 printk("%s: floating-point bundle at 0x%lx not mapped\n",
353 __FUNCTION__, fault_ip);
354 return -1;
355 }
357 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
358 &isr, &regs->pr, &regs->cr_ifs, regs);
360 if (ret.status) {
361 PSCBX(v, fpswa_ret) = ret;
362 printk("%s(%s): fp_emulate() returned %ld\n",
363 __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
364 }
366 return ret.status;
367 }
369 void
370 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
371 unsigned long iim, unsigned long itir, unsigned long arg5,
372 unsigned long arg6, unsigned long arg7, unsigned long stack)
373 {
374 struct pt_regs *regs = (struct pt_regs *)&stack;
375 unsigned long code;
376 static const char *const reason[] = {
377 "IA-64 Illegal Operation fault",
378 "IA-64 Privileged Operation fault",
379 "IA-64 Privileged Register fault",
380 "IA-64 Reserved Register/Field fault",
381 "Disabled Instruction Set Transition fault",
382 "Unknown fault 5", "Unknown fault 6",
383 "Unknown fault 7", "Illegal Hazard fault",
384 "Unknown fault 9", "Unknown fault 10",
385 "Unknown fault 11", "Unknown fault 12",
386 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
387 };
389 printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
390 "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
391 regs->cr_iip, regs->cr_ipsr, isr);
393 if ((isr & IA64_ISR_NA) &&
394 ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
395 /*
396 * This fault was due to lfetch.fault, set "ed" bit in the
397 * psr to cancel the lfetch.
398 */
399 ia64_psr(regs)->ed = 1;
400 printk("ia64_fault: handled lfetch.fault\n");
401 return;
402 }
404 switch (vector) {
405 case 0:
406 printk("VHPT Translation.\n");
407 break;
409 case 4:
410 printk("Alt DTLB.\n");
411 break;
413 case 6:
414 printk("Instruction Key Miss.\n");
415 break;
417 case 7:
418 printk("Data Key Miss.\n");
419 break;
421 case 8:
422 printk("Dirty-bit.\n");
423 break;
425 case 20:
426 printk("Page Not Found.\n");
427 break;
429 case 21:
430 printk("Key Permission.\n");
431 break;
433 case 22:
434 printk("Instruction Access Rights.\n");
435 break;
437 case 24: /* General Exception */
438 code = (isr >> 4) & 0xf;
439 printk("General Exception: %s%s.\n", reason[code],
440 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
441 " (data access)") : "");
442 if (code == 8) {
443 #ifdef CONFIG_IA64_PRINT_HAZARDS
444 printk("%s[%d]: possible hazard @ ip=%016lx "
445 "(pr = %016lx)\n", current->comm, current->pid,
446 regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
447 #endif
448 printk("ia64_fault: returning on hazard\n");
449 return;
450 }
451 break;
453 case 25:
454 printk("Disabled FP-Register.\n");
455 break;
457 case 26:
458 printk("NaT consumption.\n");
459 break;
461 case 29:
462 printk("Debug.\n");
463 break;
465 case 30:
466 printk("Unaligned Reference.\n");
467 break;
469 case 31:
470 printk("Unsupported data reference.\n");
471 break;
473 case 32:
474 printk("Floating-Point Fault.\n");
475 break;
477 case 33:
478 printk("Floating-Point Trap.\n");
479 break;
481 case 34:
482 printk("Lower Privilege Transfer Trap.\n");
483 break;
485 case 35:
486 printk("Taken Branch Trap.\n");
487 break;
489 case 36:
490 printk("Single Step Trap.\n");
491 break;
493 case 45:
494 printk("IA-32 Exception.\n");
495 break;
497 case 46:
498 printk("IA-32 Intercept.\n");
499 break;
501 case 47:
502 printk("IA-32 Interrupt.\n");
503 break;
505 default:
506 printk("Fault %lu\n", vector);
507 break;
508 }
510 show_registers(regs);
511 panic("Fault in Xen.\n");
512 }
514 unsigned long running_on_sim = 0;
516 /* Also read in hyperprivop.S */
517 int first_break = 0;
519 void
520 ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
521 unsigned long iim)
522 {
523 struct domain *d = current->domain;
524 struct vcpu *v = current;
525 IA64FAULT vector;
527 /* FIXME: don't hardcode constant */
528 if ((iim == 0x80001 || iim == 0x80002)
529 && ia64_get_cpl(regs->cr_ipsr) == 2) {
530 do_ssc(vcpu_get_gr(current, 36), regs);
531 }
532 #ifdef CRASH_DEBUG
533 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
534 if (iim == 0)
535 show_registers(regs);
536 debugger_trap_fatal(0 /* don't care */ , regs);
537 }
538 #endif
539 else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == 2) {
540 /* by default, do not continue */
541 v->arch.hypercall_continuation = 0;
543 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
544 if (!PSCBX(v, hypercall_continuation))
545 vcpu_increment_iip(current);
546 } else
547 reflect_interruption(isr, regs, vector);
548 } else if (!PSCB(v, interrupt_collection_enabled)) {
549 if (ia64_hyperprivop(iim, regs))
550 vcpu_increment_iip(current);
551 } else {
552 if (iim == 0)
553 die_if_kernel("bug check", regs, iim);
554 PSCB(v, iim) = iim;
555 reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
556 }
557 }
559 void
560 ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
561 unsigned long itir)
562 {
563 IA64FAULT vector;
565 vector = priv_emulate(current, regs, isr);
566 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
567 // Note: if a path results in a vector to reflect that requires
568 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
569 reflect_interruption(isr, regs, vector);
570 }
571 }
573 void
574 ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
575 unsigned long isr, unsigned long iim,
576 unsigned long vector)
577 {
578 struct vcpu *v = current;
579 unsigned long check_lazy_cover = 0;
580 unsigned long psr = regs->cr_ipsr;
581 unsigned long status;
583 /* Following faults shouldn'g be seen from Xen itself */
584 BUG_ON(!(psr & IA64_PSR_CPL));
586 switch (vector) {
587 case 8:
588 vector = IA64_DIRTY_BIT_VECTOR;
589 break;
590 case 9:
591 vector = IA64_INST_ACCESS_BIT_VECTOR;
592 break;
593 case 10:
594 check_lazy_cover = 1;
595 vector = IA64_DATA_ACCESS_BIT_VECTOR;
596 break;
597 case 20:
598 check_lazy_cover = 1;
599 vector = IA64_PAGE_NOT_PRESENT_VECTOR;
600 break;
601 case 22:
602 vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
603 break;
604 case 23:
605 check_lazy_cover = 1;
606 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
607 break;
608 case 25:
609 vector = IA64_DISABLED_FPREG_VECTOR;
610 break;
611 case 26:
612 if (((isr >> 4L) & 0xfL) == 1) {
613 /* Fault is due to a register NaT consumption fault. */
614 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
615 printk("ia64_handle_reflection: handling regNaT "
616 "fault\n");
617 vector = IA64_NAT_CONSUMPTION_VECTOR;
618 break;
619 }
620 #if 1
621 // pass null pointer dereferences through with no error
622 // but retain debug output for non-zero ifa
623 if (!ifa) {
624 vector = IA64_NAT_CONSUMPTION_VECTOR;
625 break;
626 }
627 #endif
628 #ifdef CONFIG_PRIVIFY
629 /* Some privified operations are coded using reg+64 instead
630 of reg. */
631 printk("*** NaT fault... attempting to handle as privop\n");
632 printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
633 isr, ifa, regs->cr_iip, psr);
634 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
635 // certain NaT faults are higher priority than privop faults
636 vector = priv_emulate(v, regs, isr);
637 if (vector == IA64_NO_FAULT) {
638 printk("*** Handled privop masquerading as NaT "
639 "fault\n");
640 return;
641 }
642 #endif
643 vector = IA64_NAT_CONSUMPTION_VECTOR;
644 break;
645 case 27:
646 //printk("*** Handled speculation vector, itc=%lx!\n",
647 // ia64_get_itc());
648 PSCB(current, iim) = iim;
649 vector = IA64_SPECULATION_VECTOR;
650 break;
651 case 30:
652 // FIXME: Should we handle unaligned refs in Xen??
653 vector = IA64_UNALIGNED_REF_VECTOR;
654 break;
655 case 32:
656 status = handle_fpu_swa(1, regs, isr);
657 if (!status) {
658 vcpu_increment_iip(v);
659 return;
660 }
661 // fetch code fail
662 if (IA64_RETRY == status)
663 return;
664 printk("ia64_handle_reflection: handling FP fault\n");
665 vector = IA64_FP_FAULT_VECTOR;
666 break;
667 case 33:
668 status = handle_fpu_swa(0, regs, isr);
669 if (!status)
670 return;
671 // fetch code fail
672 if (IA64_RETRY == status)
673 return;
674 printk("ia64_handle_reflection: handling FP trap\n");
675 vector = IA64_FP_TRAP_VECTOR;
676 break;
677 case 34:
678 printk("ia64_handle_reflection: handling lowerpriv trap\n");
679 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
680 break;
681 case 35:
682 printk("ia64_handle_reflection: handling taken branch trap\n");
683 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
684 break;
685 case 36:
686 printk("ia64_handle_reflection: handling single step trap\n");
687 vector = IA64_SINGLE_STEP_TRAP_VECTOR;
688 break;
690 default:
691 printk("ia64_handle_reflection: unhandled vector=0x%lx\n",
692 vector);
693 while (vector)
694 /* spin */;
695 return;
696 }
697 if (check_lazy_cover && (isr & IA64_ISR_IR) &&
698 handle_lazy_cover(v, regs))
699 return;
700 PSCB(current, ifa) = ifa;
701 PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
702 reflect_interruption(isr, regs, vector);
703 }
705 void
706 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
707 unsigned long isr, struct pt_regs *regs)
708 {
709 struct vcpu *v = current;
710 struct domain *d = current->domain;
711 unsigned long gpfn;
712 unsigned long pte = 0;
713 struct vhpt_lf_entry *vlfe;
715 /* There are 2 jobs to do:
716 - marking the page as dirty (the metaphysical address must be
717 extracted to do that).
718 - reflecting or not the fault (the virtual Dirty bit must be
719 extracted to decide).
720 Unfortunatly these informations are not immediatly available!
721 */
723 /* Extract the metaphysical address.
724 Try to get it from VHPT and M2P as we need the flags. */
725 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
726 pte = vlfe->page_flags;
727 if (vlfe->ti_tag == ia64_ttag(ifa)) {
728 /* The VHPT entry is valid. */
729 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
730 BUG_ON(gpfn == INVALID_M2P_ENTRY);
731 } else {
732 unsigned long itir, iha;
733 IA64FAULT fault;
735 /* The VHPT entry is not valid. */
736 vlfe = NULL;
738 /* FIXME: gives a chance to tpa, as the TC was valid. */
740 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
742 /* Try again! */
743 if (fault != IA64_NO_FAULT) {
744 /* This will trigger a dtlb miss. */
745 ia64_ptcl(ifa, PAGE_SHIFT << 2);
746 return;
747 }
748 gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
749 if (pte & _PAGE_D)
750 pte |= _PAGE_VIRT_D;
751 }
753 /* Set the dirty bit in the bitmap. */
754 shadow_mark_page_dirty(d, gpfn);
756 /* Update the local TC/VHPT and decides wether or not the fault should
757 be reflected.
758 SMP note: we almost ignore the other processors. The shadow_bitmap
759 has been atomically updated. If the dirty fault happen on another
760 processor, it will do its job.
761 */
763 if (pte != 0) {
764 /* We will know how to handle the fault. */
766 if (pte & _PAGE_VIRT_D) {
767 /* Rewrite VHPT entry.
768 There is no race here because only the
769 cpu VHPT owner can write page_flags. */
770 if (vlfe)
771 vlfe->page_flags = pte | _PAGE_D;
773 /* Purge the TC locally.
774 It will be reloaded from the VHPT iff the
775 VHPT entry is still valid. */
776 ia64_ptcl(ifa, PAGE_SHIFT << 2);
778 atomic64_inc(&d->arch.shadow_fault_count);
779 } else {
780 /* Reflect.
781 In this case there is no need to purge. */
782 ia64_handle_reflection(ifa, regs, isr, 0, 8);
783 }
784 } else {
785 /* We don't know wether or not the fault must be
786 reflected. The VHPT entry is not valid. */
787 /* FIXME: in metaphysical mode, we could do an ITC now. */
788 ia64_ptcl(ifa, PAGE_SHIFT << 2);
789 }
790 }