ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 10929:7cde0d938ef4

[IA64] convert more privop_stat to perfc

Convert most privop stats to perfc.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Aug 04 09:02:43 2006 -0600 (2006-08-04)
parents 3d6c1af609bf
children bfc69471550e
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <asm/system.h>
18 #include <asm/processor.h>
19 #include <xen/irq.h>
20 #include <xen/event.h>
21 #include <asm/privop.h>
22 #include <asm/vcpu.h>
23 #include <asm/ia64_int.h>
24 #include <asm/dom_fw.h>
25 #include <asm/vhpt.h>
26 #include <asm/debugger.h>
27 #include <asm/fpswa.h>
28 #include <asm/bundle.h>
29 #include <asm/privop_stat.h>
30 #include <asm/asm-xsi-offsets.h>
31 #include <asm/shadow.h>
33 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
34 /* FIXME: where these declarations shold be there ? */
35 extern int ia64_hyperprivop(unsigned long, REGS *);
36 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
38 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
39 // note IA64_PSR_PK removed from following, why is this necessary?
40 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
41 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
42 IA64_PSR_IT | IA64_PSR_BN)
44 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
45 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
46 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
47 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
48 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
49 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
52 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
54 // should never panic domain... if it does, stack may have been overrun
55 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
56 {
57 struct vcpu *v = current;
59 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
60 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
61 }
62 vector &= ~0xf;
63 if (vector != IA64_DATA_TLB_VECTOR &&
64 vector != IA64_ALT_DATA_TLB_VECTOR &&
65 vector != IA64_VHPT_TRANS_VECTOR) {
66 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
67 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
68 }
69 }
71 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
72 {
73 struct vcpu *v = current;
75 if (!PSCB(v,interrupt_collection_enabled))
76 check_bad_nested_interruption(isr,regs,vector);
77 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
78 PSCB(v,precover_ifs) = regs->cr_ifs;
79 vcpu_bsw0(v);
80 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
81 PSCB(v,isr) = isr;
82 PSCB(v,iip) = regs->cr_iip;
83 PSCB(v,ifs) = 0;
84 PSCB(v,incomplete_regframe) = 0;
86 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
87 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
88 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
90 v->vcpu_info->evtchn_upcall_mask = 1;
91 PSCB(v,interrupt_collection_enabled) = 0;
93 perfc_incra(slow_reflect, vector >> 8);
94 }
96 static unsigned long pending_false_positive = 0;
98 void reflect_extint(struct pt_regs *regs)
99 {
100 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
101 struct vcpu *v = current;
102 static int first_extint = 1;
104 if (first_extint) {
105 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
106 first_extint = 0;
107 }
108 if (vcpu_timer_pending_early(v))
109 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
110 PSCB(current,itir) = 0;
111 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
112 }
114 void reflect_event(struct pt_regs *regs)
115 {
116 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
117 struct vcpu *v = current;
119 /* Sanity check */
120 if (is_idle_vcpu(v) || !user_mode(regs)) {
121 //printk("WARN: invocation to reflect_event in nested xen\n");
122 return;
123 }
125 if (!event_pending(v))
126 return;
128 if (!PSCB(v,interrupt_collection_enabled))
129 printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
130 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
131 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
132 PSCB(v,precover_ifs) = regs->cr_ifs;
133 vcpu_bsw0(v);
134 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
135 PSCB(v,isr) = isr;
136 PSCB(v,iip) = regs->cr_iip;
137 PSCB(v,ifs) = 0;
138 PSCB(v,incomplete_regframe) = 0;
140 regs->cr_iip = v->arch.event_callback_ip;
141 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
142 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
144 v->vcpu_info->evtchn_upcall_mask = 1;
145 PSCB(v,interrupt_collection_enabled) = 0;
146 }
148 // ONLY gets called from ia64_leave_kernel
149 // ONLY call with interrupts disabled?? (else might miss one?)
150 // NEVER successful if already reflecting a trap/fault because psr.i==0
151 void deliver_pending_interrupt(struct pt_regs *regs)
152 {
153 struct domain *d = current->domain;
154 struct vcpu *v = current;
155 // FIXME: Will this work properly if doing an RFI???
156 if (!is_idle_domain(d) && user_mode(regs)) {
157 if (vcpu_deliverable_interrupts(v))
158 reflect_extint(regs);
159 else if (PSCB(v,pending_interruption))
160 ++pending_false_positive;
161 }
162 }
164 static int
165 handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
166 {
167 if (!PSCB(v,interrupt_collection_enabled)) {
168 PSCB(v,ifs) = regs->cr_ifs;
169 PSCB(v,incomplete_regframe) = 1;
170 regs->cr_ifs = 0;
171 perfc_incrc(lazy_cover);
172 return(1); // retry same instruction with cr.ifs off
173 }
174 return(0);
175 }
177 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
178 {
179 unsigned long iip = regs->cr_iip, iha;
180 // FIXME should validate address here
181 unsigned long pteval;
182 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
183 IA64FAULT fault;
184 int is_ptc_l_needed = 0;
185 u64 logps;
187 if ((isr & IA64_ISR_SP)
188 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
189 {
190 /*
191 * This fault was due to a speculative load or lfetch.fault, set the "ed"
192 * bit in the psr to ensure forward progress. (Target register will get a
193 * NaT for ld.s, lfetch will be canceled.)
194 */
195 ia64_psr(regs)->ed = 1;
196 return;
197 }
199 again:
200 fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
201 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
202 struct p2m_entry entry;
203 pteval = translate_domain_pte(pteval, address, itir, &logps, &entry);
204 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
205 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
206 p2m_entry_retry(&entry)) {
207 /* dtlb has been purged in-between. This dtlb was
208 matching. Undo the work. */
209 vcpu_flush_tlb_vhpt_range(address, logps);
211 // the stale entry which we inserted above
212 // may remains in tlb cache.
213 // we don't purge it now hoping next itc purges it.
214 is_ptc_l_needed = 1;
215 goto again;
216 }
217 return;
218 }
220 if (is_ptc_l_needed)
221 vcpu_ptc_l(current, address, logps);
222 if (!user_mode (regs)) {
223 /* The fault occurs inside Xen. */
224 if (!ia64_done_with_exception(regs)) {
225 // should never happen. If it does, region 0 addr may
226 // indicate a bad xen pointer
227 printk("*** xen_handle_domain_access: exception table"
228 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
229 iip, address);
230 panic_domain(regs,"*** xen_handle_domain_access: exception table"
231 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
232 iip, address);
233 }
234 return;
235 }
237 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
238 return;
240 if (!PSCB(current,interrupt_collection_enabled)) {
241 check_bad_nested_interruption(isr,regs,fault);
242 //printf("Delivering NESTED DATA TLB fault\n");
243 fault = IA64_DATA_NESTED_TLB_VECTOR;
244 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
245 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
246 // NOTE: nested trap must NOT pass PSCB address
247 //regs->r31 = (unsigned long) &PSCB(current);
248 perfc_incra(slow_reflect, fault >> 8);
249 return;
250 }
252 PSCB(current,itir) = itir;
253 PSCB(current,iha) = iha;
254 PSCB(current,ifa) = address;
255 reflect_interruption(isr, regs, fault);
256 }
258 fpswa_interface_t *fpswa_interface = 0;
260 void trap_init (void)
261 {
262 if (ia64_boot_param->fpswa)
263 /* FPSWA fixup: make the interface pointer a virtual address: */
264 fpswa_interface = __va(ia64_boot_param->fpswa);
265 else
266 printk("No FPSWA supported.\n");
267 }
269 static fpswa_ret_t
270 fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
271 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
272 unsigned long *ifs, struct pt_regs *regs)
273 {
274 fp_state_t fp_state;
275 fpswa_ret_t ret;
277 if (!fpswa_interface)
278 return ((fpswa_ret_t) {-1, 0, 0, 0});
280 memset(&fp_state, 0, sizeof(fp_state_t));
282 /*
283 * compute fp_state. only FP registers f6 - f11 are used by the
284 * kernel, so set those bits in the mask and set the low volatile
285 * pointer to point to these registers.
286 */
287 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
289 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
290 /*
291 * unsigned long (*EFI_FPSWA) (
292 * unsigned long trap_type,
293 * void *Bundle,
294 * unsigned long *pipsr,
295 * unsigned long *pfsr,
296 * unsigned long *pisr,
297 * unsigned long *ppreds,
298 * unsigned long *pifs,
299 * void *fp_state);
300 */
301 ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
302 ipsr, fpsr, isr, pr, ifs, &fp_state);
304 return ret;
305 }
307 /*
308 * Handle floating-point assist faults and traps for domain.
309 */
310 unsigned long
311 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
312 {
313 struct vcpu *v = current;
314 IA64_BUNDLE bundle;
315 unsigned long fault_ip;
316 fpswa_ret_t ret;
318 fault_ip = regs->cr_iip;
319 /*
320 * When the FP trap occurs, the trapping instruction is completed.
321 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
322 */
323 if (!fp_fault && (ia64_psr(regs)->ri == 0))
324 fault_ip -= 16;
326 if (VMX_DOMAIN(current))
327 bundle = __vmx_get_domain_bundle(fault_ip);
328 else
329 bundle = __get_domain_bundle(fault_ip);
331 if (!bundle.i64[0] && !bundle.i64[1]) {
332 printk("%s: floating-point bundle at 0x%lx not mapped\n",
333 __FUNCTION__, fault_ip);
334 return -1;
335 }
337 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
338 &isr, &regs->pr, &regs->cr_ifs, regs);
340 if (ret.status) {
341 PSCBX(v, fpswa_ret) = ret;
342 printk("%s(%s): fp_emulate() returned %ld\n",
343 __FUNCTION__, fp_fault?"fault":"trap", ret.status);
344 }
346 return ret.status;
347 }
349 void
350 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
351 unsigned long iim, unsigned long itir, unsigned long arg5,
352 unsigned long arg6, unsigned long arg7, unsigned long stack)
353 {
354 struct pt_regs *regs = (struct pt_regs *) &stack;
355 unsigned long code;
356 static const char * const reason[] = {
357 "IA-64 Illegal Operation fault",
358 "IA-64 Privileged Operation fault",
359 "IA-64 Privileged Register fault",
360 "IA-64 Reserved Register/Field fault",
361 "Disabled Instruction Set Transition fault",
362 "Unknown fault 5", "Unknown fault 6",
363 "Unknown fault 7", "Illegal Hazard fault",
364 "Unknown fault 9", "Unknown fault 10",
365 "Unknown fault 11", "Unknown fault 12",
366 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
367 };
369 printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
370 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
373 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
374 /*
375 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
376 * the lfetch.
377 */
378 ia64_psr(regs)->ed = 1;
379 printf("ia64_fault: handled lfetch.fault\n");
380 return;
381 }
383 switch (vector) {
384 case 0:
385 printk("VHPT Translation.\n");
386 break;
388 case 4:
389 printk("Alt DTLB.\n");
390 break;
392 case 6:
393 printk("Instruction Key Miss.\n");
394 break;
396 case 7:
397 printk("Data Key Miss.\n");
398 break;
400 case 8:
401 printk("Dirty-bit.\n");
402 break;
404 case 20:
405 printk("Page Not Found.\n");
406 break;
408 case 21:
409 printk("Key Permission.\n");
410 break;
412 case 22:
413 printk("Instruction Access Rights.\n");
414 break;
416 case 24: /* General Exception */
417 code = (isr >> 4) & 0xf;
418 printk("General Exception: %s%s.\n", reason[code],
419 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
420 " (data access)") : "");
421 if (code == 8) {
422 # ifdef CONFIG_IA64_PRINT_HAZARDS
423 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
424 current->comm, current->pid,
425 regs->cr_iip + ia64_psr(regs)->ri,
426 regs->pr);
427 # endif
428 printf("ia64_fault: returning on hazard\n");
429 return;
430 }
431 break;
433 case 25:
434 printk("Disabled FP-Register.\n");
435 break;
437 case 26:
438 printk("NaT consumption.\n");
439 break;
441 case 29:
442 printk("Debug.\n");
443 break;
445 case 30:
446 printk("Unaligned Reference.\n");
447 break;
449 case 31:
450 printk("Unsupported data reference.\n");
451 break;
453 case 32:
454 printk("Floating-Point Fault.\n");
455 break;
457 case 33:
458 printk("Floating-Point Trap.\n");
459 break;
461 case 34:
462 printk("Lower Privilege Transfer Trap.\n");
463 break;
465 case 35:
466 printk("Taken Branch Trap.\n");
467 break;
469 case 36:
470 printk("Single Step Trap.\n");
471 break;
473 case 45:
474 printk("IA-32 Exception.\n");
475 break;
477 case 46:
478 printk("IA-32 Intercept.\n");
479 break;
481 case 47:
482 printk("IA-32 Interrupt.\n");
483 break;
485 default:
486 printk("Fault %lu\n", vector);
487 break;
488 }
490 show_registers(regs);
491 panic("Fault in Xen.\n");
492 }
494 unsigned long running_on_sim = 0;
497 /* Also read in hyperprivop.S */
498 int first_break = 0;
500 void
501 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
502 {
503 struct domain *d = current->domain;
504 struct vcpu *v = current;
505 IA64FAULT vector;
507 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
508 do_ssc(vcpu_get_gr(current,36), regs);
509 }
510 #ifdef CRASH_DEBUG
511 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
512 if (iim == 0)
513 show_registers(regs);
514 debugger_trap_fatal(0 /* don't care */, regs);
515 }
516 #endif
517 else if (iim == d->arch.breakimm) {
518 /* by default, do not continue */
519 v->arch.hypercall_continuation = 0;
521 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
522 if (!PSCBX(v, hypercall_continuation))
523 vcpu_increment_iip(current);
524 }
525 else reflect_interruption(isr, regs, vector);
526 }
527 else if (!PSCB(v,interrupt_collection_enabled)) {
528 if (ia64_hyperprivop(iim,regs))
529 vcpu_increment_iip(current);
530 }
531 else {
532 if (iim == 0)
533 die_if_kernel("bug check", regs, iim);
534 PSCB(v,iim) = iim;
535 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
536 }
537 }
539 void
540 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
541 {
542 IA64FAULT vector;
544 vector = priv_emulate(current,regs,isr);
545 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
546 // Note: if a path results in a vector to reflect that requires
547 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
548 reflect_interruption(isr,regs,vector);
549 }
550 }
552 void
553 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
554 {
555 struct vcpu *v = current;
556 unsigned long check_lazy_cover = 0;
557 unsigned long psr = regs->cr_ipsr;
559 /* Following faults shouldn'g be seen from Xen itself */
560 BUG_ON (!(psr & IA64_PSR_CPL));
562 switch(vector) {
563 case 8:
564 vector = IA64_DIRTY_BIT_VECTOR; break;
565 case 9:
566 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
567 case 10:
568 check_lazy_cover = 1;
569 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
570 case 20:
571 check_lazy_cover = 1;
572 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
573 case 22:
574 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
575 case 23:
576 check_lazy_cover = 1;
577 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
578 case 25:
579 vector = IA64_DISABLED_FPREG_VECTOR;
580 break;
581 case 26:
582 if (((isr >> 4L) & 0xfL) == 1) {
583 /* Fault is due to a register NaT consumption fault. */
584 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
585 printf("ia64_handle_reflection: handling regNaT fault\n");
586 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
587 }
588 #if 1
589 // pass null pointer dereferences through with no error
590 // but retain debug output for non-zero ifa
591 if (!ifa) {
592 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
593 }
594 #endif
595 #ifdef CONFIG_PRIVIFY
596 /* Some privified operations are coded using reg+64 instead
597 of reg. */
598 printf("*** NaT fault... attempting to handle as privop\n");
599 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
600 isr, ifa, regs->cr_iip, psr);
601 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
602 // certain NaT faults are higher priority than privop faults
603 vector = priv_emulate(v,regs,isr);
604 if (vector == IA64_NO_FAULT) {
605 printf("*** Handled privop masquerading as NaT fault\n");
606 return;
607 }
608 #endif
609 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
610 case 27:
611 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
612 PSCB(current,iim) = iim;
613 vector = IA64_SPECULATION_VECTOR; break;
614 case 30:
615 // FIXME: Should we handle unaligned refs in Xen??
616 vector = IA64_UNALIGNED_REF_VECTOR; break;
617 case 32:
618 if (!(handle_fpu_swa(1, regs, isr))) {
619 vcpu_increment_iip(v);
620 return;
621 }
622 printf("ia64_handle_reflection: handling FP fault\n");
623 vector = IA64_FP_FAULT_VECTOR; break;
624 case 33:
625 if (!(handle_fpu_swa(0, regs, isr))) return;
626 printf("ia64_handle_reflection: handling FP trap\n");
627 vector = IA64_FP_TRAP_VECTOR; break;
628 case 34:
629 printf("ia64_handle_reflection: handling lowerpriv trap\n");
630 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
631 case 35:
632 printf("ia64_handle_reflection: handling taken branch trap\n");
633 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
634 case 36:
635 printf("ia64_handle_reflection: handling single step trap\n");
636 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
638 default:
639 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
640 while(vector);
641 return;
642 }
643 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
644 PSCB(current,ifa) = ifa;
645 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
646 reflect_interruption(isr,regs,vector);
647 }
649 void
650 ia64_shadow_fault(unsigned long ifa, unsigned long itir,
651 unsigned long isr, struct pt_regs *regs)
652 {
653 struct vcpu *v = current;
654 struct domain *d = current->domain;
655 unsigned long gpfn;
656 unsigned long pte = 0;
657 struct vhpt_lf_entry *vlfe;
659 /* There are 2 jobs to do:
660 - marking the page as dirty (the metaphysical address must be
661 extracted to do that).
662 - reflecting or not the fault (the virtual Dirty bit must be
663 extracted to decide).
664 Unfortunatly these informations are not immediatly available!
665 */
667 /* Extract the metaphysical address.
668 Try to get it from VHPT and M2P as we need the flags. */
669 vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
670 pte = vlfe->page_flags;
671 if (vlfe->ti_tag == ia64_ttag(ifa)) {
672 /* The VHPT entry is valid. */
673 gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
674 BUG_ON(gpfn == INVALID_M2P_ENTRY);
675 }
676 else {
677 unsigned long itir, iha;
678 IA64FAULT fault;
680 /* The VHPT entry is not valid. */
681 vlfe = NULL;
683 /* FIXME: gives a chance to tpa, as the TC was valid. */
685 fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
687 /* Try again! */
688 if (fault != IA64_NO_FAULT) {
689 /* This will trigger a dtlb miss. */
690 ia64_ptcl(ifa, PAGE_SHIFT << 2);
691 return;
692 }
693 gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
694 if (pte & _PAGE_D)
695 pte |= _PAGE_VIRT_D;
696 }
698 /* Set the dirty bit in the bitmap. */
699 shadow_mark_page_dirty (d, gpfn);
701 /* Update the local TC/VHPT and decides wether or not the fault should
702 be reflected.
703 SMP note: we almost ignore the other processors. The shadow_bitmap
704 has been atomically updated. If the dirty fault happen on another
705 processor, it will do its job.
706 */
708 if (pte != 0) {
709 /* We will know how to handle the fault. */
711 if (pte & _PAGE_VIRT_D) {
712 /* Rewrite VHPT entry.
713 There is no race here because only the
714 cpu VHPT owner can write page_flags. */
715 if (vlfe)
716 vlfe->page_flags = pte | _PAGE_D;
718 /* Purge the TC locally.
719 It will be reloaded from the VHPT iff the
720 VHPT entry is still valid. */
721 ia64_ptcl(ifa, PAGE_SHIFT << 2);
723 atomic64_inc(&d->arch.shadow_fault_count);
724 }
725 else {
726 /* Reflect.
727 In this case there is no need to purge. */
728 ia64_handle_reflection(ifa, regs, isr, 0, 8);
729 }
730 }
731 else {
732 /* We don't know wether or not the fault must be
733 reflected. The VHPT entry is not valid. */
734 /* FIXME: in metaphysical mode, we could do an ITC now. */
735 ia64_ptcl(ifa, PAGE_SHIFT << 2);
736 }
737 }