ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 10938:bfc69471550e

[IA64] fix a fetch code bug

Fetch code may fail, if there is no corresponding tlb entry
in THASH-VTLB. This patch adds "retry mechanism" to resolve
this issue.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Aug 09 08:01:52 2006 -0600 (2006-08-09)
parents 7cde0d938ef4
children 7c79d49033c6
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <asm/system.h>
18 #include <asm/processor.h>
19 #include <xen/irq.h>
20 #include <xen/event.h>
21 #include <asm/privop.h>
22 #include <asm/vcpu.h>
23 #include <asm/ia64_int.h>
24 #include <asm/dom_fw.h>
25 #include <asm/vhpt.h>
26 #include <asm/debugger.h>
27 #include <asm/fpswa.h>
28 #include <asm/bundle.h>
29 #include <asm/privop_stat.h>
30 #include <asm/asm-xsi-offsets.h>
31 #include <asm/shadow.h>
33 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
34 /* FIXME: where these declarations shold be there ? */
35 extern int ia64_hyperprivop(unsigned long, REGS *);
36 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
38 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
39 // note IA64_PSR_PK removed from following, why is this necessary?
40 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
41 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
42 IA64_PSR_IT | IA64_PSR_BN)
44 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
45 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
46 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
47 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
48 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
49 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
52 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
54 // should never panic domain... if it does, stack may have been overrun
55 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
56 {
57 struct vcpu *v = current;
59 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
60 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
61 }
62 vector &= ~0xf;
63 if (vector != IA64_DATA_TLB_VECTOR &&
64 vector != IA64_ALT_DATA_TLB_VECTOR &&
65 vector != IA64_VHPT_TRANS_VECTOR) {
66 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
67 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
68 }
69 }
71 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
72 {
73 struct vcpu *v = current;
75 if (!PSCB(v,interrupt_collection_enabled))
76 check_bad_nested_interruption(isr,regs,vector);
77 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
78 PSCB(v,precover_ifs) = regs->cr_ifs;
79 vcpu_bsw0(v);
80 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
81 PSCB(v,isr) = isr;
82 PSCB(v,iip) = regs->cr_iip;
83 PSCB(v,ifs) = 0;
84 PSCB(v,incomplete_regframe) = 0;
86 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
87 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
88 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
90 v->vcpu_info->evtchn_upcall_mask = 1;
91 PSCB(v,interrupt_collection_enabled) = 0;
93 perfc_incra(slow_reflect, vector >> 8);
94 }
96 static unsigned long pending_false_positive = 0;
98 void reflect_extint(struct pt_regs *regs)
99 {
100 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
101 struct vcpu *v = current;
102 static int first_extint = 1;
104 if (first_extint) {
105 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
106 first_extint = 0;
107 }
108 if (vcpu_timer_pending_early(v))
109 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
110 PSCB(current,itir) = 0;
111 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
112 }
114 void reflect_event(struct pt_regs *regs)
115 {
116 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
117 struct vcpu *v = current;
119 /* Sanity check */
120 if (is_idle_vcpu(v) || !user_mode(regs)) {
121 //printk("WARN: invocation to reflect_event in nested xen\n");
122 return;
123 }
125 if (!event_pending(v))
126 return;
128 if (!PSCB(v,interrupt_collection_enabled))
129 printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
130 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
131 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
132 PSCB(v,precover_ifs) = regs->cr_ifs;
133 vcpu_bsw0(v);
134 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
135 PSCB(v,isr) = isr;
136 PSCB(v,iip) = regs->cr_iip;
137 PSCB(v,ifs) = 0;
138 PSCB(v,incomplete_regframe) = 0;
140 regs->cr_iip = v->arch.event_callback_ip;
141 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
142 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
144 v->vcpu_info->evtchn_upcall_mask = 1;
145 PSCB(v,interrupt_collection_enabled) = 0;
146 }
148 // ONLY gets called from ia64_leave_kernel
149 // ONLY call with interrupts disabled?? (else might miss one?)
150 // NEVER successful if already reflecting a trap/fault because psr.i==0
151 void deliver_pending_interrupt(struct pt_regs *regs)
152 {
153 struct domain *d = current->domain;
154 struct vcpu *v = current;
155 // FIXME: Will this work properly if doing an RFI???
156 if (!is_idle_domain(d) && user_mode(regs)) {
157 if (vcpu_deliverable_interrupts(v))
158 reflect_extint(regs);
159 else if (PSCB(v,pending_interruption))
160 ++pending_false_positive;
161 }
162 }
164 static int
165 handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
166 {
167 if (!PSCB(v,interrupt_collection_enabled)) {
168 PSCB(v,ifs) = regs->cr_ifs;
169 PSCB(v,incomplete_regframe) = 1;
170 regs->cr_ifs = 0;
171 perfc_incrc(lazy_cover);
172 return(1); // retry same instruction with cr.ifs off
173 }
174 return(0);
175 }
177 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
178 {
179 unsigned long iip = regs->cr_iip, iha;
180 // FIXME should validate address here
181 unsigned long pteval;
182 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
183 IA64FAULT fault;
184 int is_ptc_l_needed = 0;
185 u64 logps;
187 if ((isr & IA64_ISR_SP)
188 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
189 {
190 /*
191 * This fault was due to a speculative load or lfetch.fault, set the "ed"
192 * bit in the psr to ensure forward progress. (Target register will get a
193 * NaT for ld.s, lfetch will be canceled.)
194 */
195 ia64_psr(regs)->ed = 1;
196 return;
197 }
199 again:
200 fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
201 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
202 struct p2m_entry entry;
203 pteval = translate_domain_pte(pteval, address, itir, &logps, &entry);
204 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
205 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
206 p2m_entry_retry(&entry)) {
207 /* dtlb has been purged in-between. This dtlb was
208 matching. Undo the work. */
209 vcpu_flush_tlb_vhpt_range(address, logps);
211 // the stale entry which we inserted above
212 // may remains in tlb cache.
213 // we don't purge it now hoping next itc purges it.
214 is_ptc_l_needed = 1;
215 goto again;
216 }
217 return;
218 }
220 if (is_ptc_l_needed)
221 vcpu_ptc_l(current, address, logps);
222 if (!user_mode (regs)) {
223 /* The fault occurs inside Xen. */
224 if (!ia64_done_with_exception(regs)) {
225 // should never happen. If it does, region 0 addr may
226 // indicate a bad xen pointer
227 printk("*** xen_handle_domain_access: exception table"
228 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
229 iip, address);
230 panic_domain(regs,"*** xen_handle_domain_access: exception table"
231 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
232 iip, address);
233 }
234 return;
235 }
237 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
238 return;
240 if (!PSCB(current,interrupt_collection_enabled)) {
241 check_bad_nested_interruption(isr,regs,fault);
242 //printf("Delivering NESTED DATA TLB fault\n");
243 fault = IA64_DATA_NESTED_TLB_VECTOR;
244 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
245 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
246 // NOTE: nested trap must NOT pass PSCB address
247 //regs->r31 = (unsigned long) &PSCB(current);
248 perfc_incra(slow_reflect, fault >> 8);
249 return;
250 }
252 PSCB(current,itir) = itir;
253 PSCB(current,iha) = iha;
254 PSCB(current,ifa) = address;
255 reflect_interruption(isr, regs, fault);
256 }
258 fpswa_interface_t *fpswa_interface = 0;
260 void trap_init (void)
261 {
262 if (ia64_boot_param->fpswa)
263 /* FPSWA fixup: make the interface pointer a virtual address: */
264 fpswa_interface = __va(ia64_boot_param->fpswa);
265 else
266 printk("No FPSWA supported.\n");
267 }
269 static fpswa_ret_t
270 fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
271 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
272 unsigned long *ifs, struct pt_regs *regs)
273 {
274 fp_state_t fp_state;
275 fpswa_ret_t ret;
277 if (!fpswa_interface)
278 return ((fpswa_ret_t) {-1, 0, 0, 0});
280 memset(&fp_state, 0, sizeof(fp_state_t));
282 /*
283 * compute fp_state. only FP registers f6 - f11 are used by the
284 * kernel, so set those bits in the mask and set the low volatile
285 * pointer to point to these registers.
286 */
287 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
289 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
290 /*
291 * unsigned long (*EFI_FPSWA) (
292 * unsigned long trap_type,
293 * void *Bundle,
294 * unsigned long *pipsr,
295 * unsigned long *pfsr,
296 * unsigned long *pisr,
297 * unsigned long *ppreds,
298 * unsigned long *pifs,
299 * void *fp_state);
300 */
301 ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
302 ipsr, fpsr, isr, pr, ifs, &fp_state);
304 return ret;
305 }
307 /*
308 * Handle floating-point assist faults and traps for domain.
309 */
310 unsigned long
311 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
312 {
313 struct vcpu *v = current;
314 IA64_BUNDLE bundle;
315 unsigned long fault_ip;
316 fpswa_ret_t ret;
318 fault_ip = regs->cr_iip;
319 /*
320 * When the FP trap occurs, the trapping instruction is completed.
321 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
322 */
323 if (!fp_fault && (ia64_psr(regs)->ri == 0))
324 fault_ip -= 16;
326 if (VMX_DOMAIN(current)) {
327 if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
328 return IA64_RETRY;
329 }
330 else
331 bundle = __get_domain_bundle(fault_ip);
333 if (!bundle.i64[0] && !bundle.i64[1]) {
334 printk("%s: floating-point bundle at 0x%lx not mapped\n",
335 __FUNCTION__, fault_ip);
336 return -1;
337 }
339 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
340 &isr, &regs->pr, &regs->cr_ifs, regs);
342 if (ret.status) {
343 PSCBX(v, fpswa_ret) = ret;
344 printk("%s(%s): fp_emulate() returned %ld\n",
345 __FUNCTION__, fp_fault?"fault":"trap", ret.status);
346 }
348 return ret.status;
349 }
351 void
352 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
353 unsigned long iim, unsigned long itir, unsigned long arg5,
354 unsigned long arg6, unsigned long arg7, unsigned long stack)
355 {
356 struct pt_regs *regs = (struct pt_regs *) &stack;
357 unsigned long code;
358 static const char * const reason[] = {
359 "IA-64 Illegal Operation fault",
360 "IA-64 Privileged Operation fault",
361 "IA-64 Privileged Register fault",
362 "IA-64 Reserved Register/Field fault",
363 "Disabled Instruction Set Transition fault",
364 "Unknown fault 5", "Unknown fault 6",
365 "Unknown fault 7", "Illegal Hazard fault",
366 "Unknown fault 9", "Unknown fault 10",
367 "Unknown fault 11", "Unknown fault 12",
368 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
369 };
371 printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
372 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
375 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
376 /*
377 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
378 * the lfetch.
379 */
380 ia64_psr(regs)->ed = 1;
381 printf("ia64_fault: handled lfetch.fault\n");
382 return;
383 }
385 switch (vector) {
386 case 0:
387 printk("VHPT Translation.\n");
388 break;
390 case 4:
391 printk("Alt DTLB.\n");
392 break;
394 case 6:
395 printk("Instruction Key Miss.\n");
396 break;
398 case 7:
399 printk("Data Key Miss.\n");
400 break;
402 case 8:
403 printk("Dirty-bit.\n");
404 break;
406 case 20:
407 printk("Page Not Found.\n");
408 break;
410 case 21:
411 printk("Key Permission.\n");
412 break;
414 case 22:
415 printk("Instruction Access Rights.\n");
416 break;
418 case 24: /* General Exception */
419 code = (isr >> 4) & 0xf;
420 printk("General Exception: %s%s.\n", reason[code],
421 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
422 " (data access)") : "");
423 if (code == 8) {
424 # ifdef CONFIG_IA64_PRINT_HAZARDS
425 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
426 current->comm, current->pid,
427 regs->cr_iip + ia64_psr(regs)->ri,
428 regs->pr);
429 # endif
430 printf("ia64_fault: returning on hazard\n");
431 return;
432 }
433 break;
435 case 25:
436 printk("Disabled FP-Register.\n");
437 break;
439 case 26:
440 printk("NaT consumption.\n");
441 break;
443 case 29:
444 printk("Debug.\n");
445 break;
447 case 30:
448 printk("Unaligned Reference.\n");
449 break;
451 case 31:
452 printk("Unsupported data reference.\n");
453 break;
455 case 32:
456 printk("Floating-Point Fault.\n");
457 break;
459 case 33:
460 printk("Floating-Point Trap.\n");
461 break;
463 case 34:
464 printk("Lower Privilege Transfer Trap.\n");
465 break;
467 case 35:
468 printk("Taken Branch Trap.\n");
469 break;
471 case 36:
472 printk("Single Step Trap.\n");
473 break;
475 case 45:
476 printk("IA-32 Exception.\n");
477 break;
479 case 46:
480 printk("IA-32 Intercept.\n");
481 break;
483 case 47:
484 printk("IA-32 Interrupt.\n");
485 break;
487 default:
488 printk("Fault %lu\n", vector);
489 break;
490 }
492 show_registers(regs);
493 panic("Fault in Xen.\n");
494 }
496 unsigned long running_on_sim = 0;
499 /* Also read in hyperprivop.S */
500 int first_break = 0;
502 void
503 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
504 {
505 struct domain *d = current->domain;
506 struct vcpu *v = current;
507 IA64FAULT vector;
509 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
510 do_ssc(vcpu_get_gr(current,36), regs);
511 }
512 #ifdef CRASH_DEBUG
513 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
514 if (iim == 0)
515 show_registers(regs);
516 debugger_trap_fatal(0 /* don't care */, regs);
517 }
518 #endif
519 else if (iim == d->arch.breakimm) {
520 /* by default, do not continue */
521 v->arch.hypercall_continuation = 0;
523 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
524 if (!PSCBX(v, hypercall_continuation))
525 vcpu_increment_iip(current);
526 }
527 else reflect_interruption(isr, regs, vector);
528 }
529 else if (!PSCB(v,interrupt_collection_enabled)) {
530 if (ia64_hyperprivop(iim,regs))
531 vcpu_increment_iip(current);
532 }
533 else {
534 if (iim == 0)
535 die_if_kernel("bug check", regs, iim);
536 PSCB(v,iim) = iim;
537 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
538 }
539 }
541 void
542 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
543 {
544 IA64FAULT vector;
546 vector = priv_emulate(current,regs,isr);
547 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
548 // Note: if a path results in a vector to reflect that requires
549 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
550 reflect_interruption(isr,regs,vector);
551 }
552 }
554 void
555 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
556 {
557 struct vcpu *v = current;
558 unsigned long check_lazy_cover = 0;
559 unsigned long psr = regs->cr_ipsr;
560 unsigned long status;
562 /* Following faults shouldn'g be seen from Xen itself */
563 BUG_ON (!(psr & IA64_PSR_CPL));
565 switch(vector) {
566 case 8:
567 vector = IA64_DIRTY_BIT_VECTOR; break;
568 case 9:
569 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
570 case 10:
571 check_lazy_cover = 1;
572 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
573 case 20:
574 check_lazy_cover = 1;
575 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
576 case 22:
577 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
578 case 23:
579 check_lazy_cover = 1;
580 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
581 case 25:
582 vector = IA64_DISABLED_FPREG_VECTOR;
583 break;
584 case 26:
585 if (((isr >> 4L) & 0xfL) == 1) {
586 /* Fault is due to a register NaT consumption fault. */
587 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
588 printf("ia64_handle_reflection: handling regNaT fault\n");
589 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
590 }
591 #if 1
592 // pass null pointer dereferences through with no error
593 // but retain debug output for non-zero ifa
594 if (!ifa) {
595 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
596 }
597 #endif
598 #ifdef CONFIG_PRIVIFY
599 /* Some privified operations are coded using reg+64 instead
600 of reg. */
601 printf("*** NaT fault... attempting to handle as privop\n");
602 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
603 isr, ifa, regs->cr_iip, psr);
604 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
605 // certain NaT faults are higher priority than privop faults
606 vector = priv_emulate(v,regs,isr);
607 if (vector == IA64_NO_FAULT) {
608 printf("*** Handled privop masquerading as NaT fault\n");
609 return;
610 }
611 #endif
612 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
613 case 27:
614 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
615 PSCB(current,iim) = iim;
616 vector = IA64_SPECULATION_VECTOR; break;
617 case 30:
618 // FIXME: Should we handle unaligned refs in Xen??
619 vector = IA64_UNALIGNED_REF_VECTOR; break;
620 case 32:
621 status = handle_fpu_swa(1, regs, isr);
622 if (!status) {
623 vcpu_increment_iip(v);
624 return;
625 }
626 // fetch code fail
627 if (IA64_RETRY == status)
628 return;
629 printf("ia64_handle_reflection: handling FP fault\n");
630 vector = IA64_FP_FAULT_VECTOR; break;
631 case 33:
632 status = handle_fpu_swa(0, regs, isr);
633 if (!status)
634 return;
635 // fetch code fail
636 if (IA64_RETRY == status)
637 return;
638 printf("ia64_handle_reflection: handling FP trap\n");
639 vector = IA64_FP_TRAP_VECTOR; break;
640 case 34:
641 printf("ia64_handle_reflection: handling lowerpriv trap\n");
642 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
643 case 35:
644 printf("ia64_handle_reflection: handling taken branch trap\n");
645 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
646 case 36:
647 printf("ia64_handle_reflection: handling single step trap\n");
648 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
650 default:
651 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
652 while(vector);
653 return;
654 }
655 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
656 PSCB(current,ifa) = ifa;
657 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
658 reflect_interruption(isr,regs,vector);
659 }
661 void
662 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
663 unsigned long isr, struct pt_regs *regs)
664 {
665 struct vcpu *v = current;
666 struct domain *d = current->domain;
667 unsigned long gpfn;
668 unsigned long pte = 0;
669 struct vhpt_lf_entry *vlfe;
671 /* There are 2 jobs to do:
672 - marking the page as dirty (the metaphysical address must be
673 extracted to do that).
674 - reflecting or not the fault (the virtual Dirty bit must be
675 extracted to decide).
676 Unfortunatly these informations are not immediatly available!
677 */
679 /* Extract the metaphysical address.
680 Try to get it from VHPT and M2P as we need the flags. */
681 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
682 pte = vlfe->page_flags;
683 if (vlfe->ti_tag == ia64_ttag(ifa)) {
684 /* The VHPT entry is valid. */
685 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
686 BUG_ON(gpfn == INVALID_M2P_ENTRY);
687 }
688 else {
689 unsigned long itir, iha;
690 IA64FAULT fault;
692 /* The VHPT entry is not valid. */
693 vlfe = NULL;
695 /* FIXME: gives a chance to tpa, as the TC was valid. */
697 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
699 /* Try again! */
700 if (fault != IA64_NO_FAULT) {
701 /* This will trigger a dtlb miss. */
702 ia64_ptcl(ifa, PAGE_SHIFT << 2);
703 return;
704 }
705 gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
706 if (pte & _PAGE_D)
707 pte |= _PAGE_VIRT_D;
708 }
710 /* Set the dirty bit in the bitmap. */
711 shadow_mark_page_dirty (d, gpfn);
713 /* Update the local TC/VHPT and decides wether or not the fault should
714 be reflected.
715 SMP note: we almost ignore the other processors. The shadow_bitmap
716 has been atomically updated. If the dirty fault happen on another
717 processor, it will do its job.
718 */
720 if (pte != 0) {
721 /* We will know how to handle the fault. */
723 if (pte & _PAGE_VIRT_D) {
724 /* Rewrite VHPT entry.
725 There is no race here because only the
726 cpu VHPT owner can write page_flags. */
727 if (vlfe)
728 vlfe->page_flags = pte | _PAGE_D;
730 /* Purge the TC locally.
731 It will be reloaded from the VHPT iff the
732 VHPT entry is still valid. */
733 ia64_ptcl(ifa, PAGE_SHIFT << 2);
735 atomic64_inc(&d->arch.shadow_fault_count);
736 }
737 else {
738 /* Reflect.
739 In this case there is no need to purge. */
740 ia64_handle_reflection(ifa, regs, isr, 0, 8);
741 }
742 }
743 else {
744 /* We don't know wether or not the fault must be
745 reflected. The VHPT entry is not valid. */
746 /* FIXME: in metaphysical mode, we could do an ITC now. */
747 ia64_ptcl(ifa, PAGE_SHIFT << 2);
748 }
749 }