ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 10688:bdc0258e162a

[IA64] fix vcpu_itr_i(), vcpu_itr_d() and vcpu_flush_tlb_vhpt_range() callers

- vcpu_itr_i() and vcpu_itr_d() must purge vTLB entry which overlaps the
new inserted entry.
- some address argument of vcpu_flush_tlb_vhpt_range() was wrong.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Mon Jul 10 14:11:44 2006 -0600 (2006-07-10)
parents 550786d7d352
children c8bc76d877e0
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <asm/system.h>
19 #include <asm/processor.h>
20 #include <xen/irq.h>
21 #include <xen/event.h>
22 #include <asm/privop.h>
23 #include <asm/vcpu.h>
24 #include <asm/ia64_int.h>
25 #include <asm/dom_fw.h>
26 #include <asm/vhpt.h>
27 #include <asm/debugger.h>
28 #include <asm/fpswa.h>
29 #include <asm/bundle.h>
30 #include <asm/privop_stat.h>
31 #include <asm/asm-xsi-offsets.h>
33 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
34 /* FIXME: where these declarations shold be there ? */
35 extern int ia64_hyperprivop(unsigned long, REGS *);
36 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
38 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
39 // note IA64_PSR_PK removed from following, why is this necessary?
40 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
41 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
42 IA64_PSR_IT | IA64_PSR_BN)
44 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
45 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
46 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
47 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
48 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
49 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
52 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
54 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
56 // should never panic domain... if it does, stack may have been overrun
57 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
58 {
59 struct vcpu *v = current;
61 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
62 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
63 }
64 vector &= ~0xf;
65 if (vector != IA64_DATA_TLB_VECTOR &&
66 vector != IA64_ALT_DATA_TLB_VECTOR &&
67 vector != IA64_VHPT_TRANS_VECTOR) {
68 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
69 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
70 }
71 }
73 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
74 {
75 struct vcpu *v = current;
77 if (!PSCB(v,interrupt_collection_enabled))
78 check_bad_nested_interruption(isr,regs,vector);
79 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
80 PSCB(v,precover_ifs) = regs->cr_ifs;
81 vcpu_bsw0(v);
82 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
83 PSCB(v,isr) = isr;
84 PSCB(v,iip) = regs->cr_iip;
85 PSCB(v,ifs) = 0;
86 PSCB(v,incomplete_regframe) = 0;
88 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
89 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
90 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
92 v->vcpu_info->evtchn_upcall_mask = 1;
93 PSCB(v,interrupt_collection_enabled) = 0;
95 inc_slow_reflect_count(vector);
96 }
98 static unsigned long pending_false_positive = 0;
100 void reflect_extint(struct pt_regs *regs)
101 {
102 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
103 struct vcpu *v = current;
104 static int first_extint = 1;
106 if (first_extint) {
107 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
108 first_extint = 0;
109 }
110 if (vcpu_timer_pending_early(v))
111 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
112 PSCB(current,itir) = 0;
113 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
114 }
116 void reflect_event(struct pt_regs *regs)
117 {
118 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
119 struct vcpu *v = current;
121 /* Sanity check */
122 if (is_idle_vcpu(v) || !user_mode(regs)) {
123 //printk("WARN: invocation to reflect_event in nested xen\n");
124 return;
125 }
127 if (!event_pending(v))
128 return;
130 if (!PSCB(v,interrupt_collection_enabled))
131 printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
132 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
133 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
134 PSCB(v,precover_ifs) = regs->cr_ifs;
135 vcpu_bsw0(v);
136 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
137 PSCB(v,isr) = isr;
138 PSCB(v,iip) = regs->cr_iip;
139 PSCB(v,ifs) = 0;
140 PSCB(v,incomplete_regframe) = 0;
142 regs->cr_iip = v->arch.event_callback_ip;
143 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
144 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
146 v->vcpu_info->evtchn_upcall_mask = 1;
147 PSCB(v,interrupt_collection_enabled) = 0;
148 }
150 // ONLY gets called from ia64_leave_kernel
151 // ONLY call with interrupts disabled?? (else might miss one?)
152 // NEVER successful if already reflecting a trap/fault because psr.i==0
153 void deliver_pending_interrupt(struct pt_regs *regs)
154 {
155 struct domain *d = current->domain;
156 struct vcpu *v = current;
157 // FIXME: Will this work properly if doing an RFI???
158 if (!is_idle_domain(d) && user_mode(regs)) {
159 if (vcpu_deliverable_interrupts(v))
160 reflect_extint(regs);
161 else if (PSCB(v,pending_interruption))
162 ++pending_false_positive;
163 }
164 }
166 static int
167 handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
168 {
169 if (!PSCB(v,interrupt_collection_enabled)) {
170 PSCB(v,ifs) = regs->cr_ifs;
171 PSCB(v,incomplete_regframe) = 1;
172 regs->cr_ifs = 0;
173 lazy_cover_count++;
174 return(1); // retry same instruction with cr.ifs off
175 }
176 return(0);
177 }
179 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
180 {
181 unsigned long iip = regs->cr_iip, iha;
182 // FIXME should validate address here
183 unsigned long pteval;
184 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
185 IA64FAULT fault;
186 int is_ptc_l_needed = 0;
187 u64 logps;
189 if ((isr & IA64_ISR_SP)
190 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
191 {
192 /*
193 * This fault was due to a speculative load or lfetch.fault, set the "ed"
194 * bit in the psr to ensure forward progress. (Target register will get a
195 * NaT for ld.s, lfetch will be canceled.)
196 */
197 ia64_psr(regs)->ed = 1;
198 return;
199 }
201 again:
202 fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
203 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
204 struct p2m_entry entry;
205 pteval = translate_domain_pte(pteval, address, itir, &logps, &entry);
206 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
207 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
208 p2m_entry_retry(&entry)) {
209 /* dtlb has been purged in-between. This dtlb was
210 matching. Undo the work. */
211 vcpu_flush_tlb_vhpt_range(address, logps);
213 // the stale entry which we inserted above
214 // may remains in tlb cache.
215 // we don't purge it now hoping next itc purges it.
216 is_ptc_l_needed = 1;
217 goto again;
218 }
219 return;
220 }
222 if (is_ptc_l_needed)
223 vcpu_ptc_l(current, address, logps);
224 if (!user_mode (regs)) {
225 /* The fault occurs inside Xen. */
226 if (!ia64_done_with_exception(regs)) {
227 // should never happen. If it does, region 0 addr may
228 // indicate a bad xen pointer
229 printk("*** xen_handle_domain_access: exception table"
230 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
231 iip, address);
232 panic_domain(regs,"*** xen_handle_domain_access: exception table"
233 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
234 iip, address);
235 }
236 return;
237 }
239 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
240 return;
242 if (!PSCB(current,interrupt_collection_enabled)) {
243 check_bad_nested_interruption(isr,regs,fault);
244 //printf("Delivering NESTED DATA TLB fault\n");
245 fault = IA64_DATA_NESTED_TLB_VECTOR;
246 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
247 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
248 // NOTE: nested trap must NOT pass PSCB address
249 //regs->r31 = (unsigned long) &PSCB(current);
250 inc_slow_reflect_count(fault);
251 return;
252 }
254 PSCB(current,itir) = itir;
255 PSCB(current,iha) = iha;
256 PSCB(current,ifa) = address;
257 reflect_interruption(isr, regs, fault);
258 }
260 fpswa_interface_t *fpswa_interface = 0;
262 void trap_init (void)
263 {
264 if (ia64_boot_param->fpswa)
265 /* FPSWA fixup: make the interface pointer a virtual address: */
266 fpswa_interface = __va(ia64_boot_param->fpswa);
267 else
268 printk("No FPSWA supported.\n");
269 }
271 static fpswa_ret_t
272 fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
273 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
274 unsigned long *ifs, struct pt_regs *regs)
275 {
276 fp_state_t fp_state;
277 fpswa_ret_t ret;
279 if (!fpswa_interface)
280 return ((fpswa_ret_t) {-1, 0, 0, 0});
282 memset(&fp_state, 0, sizeof(fp_state_t));
284 /*
285 * compute fp_state. only FP registers f6 - f11 are used by the
286 * kernel, so set those bits in the mask and set the low volatile
287 * pointer to point to these registers.
288 */
289 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
291 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
292 /*
293 * unsigned long (*EFI_FPSWA) (
294 * unsigned long trap_type,
295 * void *Bundle,
296 * unsigned long *pipsr,
297 * unsigned long *pfsr,
298 * unsigned long *pisr,
299 * unsigned long *ppreds,
300 * unsigned long *pifs,
301 * void *fp_state);
302 */
303 ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
304 ipsr, fpsr, isr, pr, ifs, &fp_state);
306 return ret;
307 }
309 /*
310 * Handle floating-point assist faults and traps for domain.
311 */
312 unsigned long
313 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
314 {
315 struct vcpu *v = current;
316 IA64_BUNDLE bundle;
317 IA64_BUNDLE __get_domain_bundle(UINT64);
318 unsigned long fault_ip;
319 fpswa_ret_t ret;
321 fault_ip = regs->cr_iip;
322 /*
323 * When the FP trap occurs, the trapping instruction is completed.
324 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
325 */
326 if (!fp_fault && (ia64_psr(regs)->ri == 0))
327 fault_ip -= 16;
328 bundle = __get_domain_bundle(fault_ip);
329 if (!bundle.i64[0] && !bundle.i64[1]) {
330 printk("%s: floating-point bundle at 0x%lx not mapped\n",
331 __FUNCTION__, fault_ip);
332 return -1;
333 }
335 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
336 &isr, &regs->pr, &regs->cr_ifs, regs);
338 if (ret.status) {
339 PSCBX(v, fpswa_ret) = ret;
340 printk("%s(%s): fp_emulate() returned %ld\n",
341 __FUNCTION__, fp_fault?"fault":"trap", ret.status);
342 }
344 return ret.status;
345 }
347 void
348 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
349 unsigned long iim, unsigned long itir, unsigned long arg5,
350 unsigned long arg6, unsigned long arg7, unsigned long stack)
351 {
352 struct pt_regs *regs = (struct pt_regs *) &stack;
353 unsigned long code;
354 static const char * const reason[] = {
355 "IA-64 Illegal Operation fault",
356 "IA-64 Privileged Operation fault",
357 "IA-64 Privileged Register fault",
358 "IA-64 Reserved Register/Field fault",
359 "Disabled Instruction Set Transition fault",
360 "Unknown fault 5", "Unknown fault 6",
361 "Unknown fault 7", "Illegal Hazard fault",
362 "Unknown fault 9", "Unknown fault 10",
363 "Unknown fault 11", "Unknown fault 12",
364 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
365 };
367 printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
368 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
371 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
372 /*
373 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
374 * the lfetch.
375 */
376 ia64_psr(regs)->ed = 1;
377 printf("ia64_fault: handled lfetch.fault\n");
378 return;
379 }
381 switch (vector) {
382 case 0:
383 printk("VHPT Translation.\n");
384 break;
386 case 4:
387 printk("Alt DTLB.\n");
388 break;
390 case 6:
391 printk("Instruction Key Miss.\n");
392 break;
394 case 7:
395 printk("Data Key Miss.\n");
396 break;
398 case 8:
399 printk("Dirty-bit.\n");
400 break;
402 case 20:
403 printk("Page Not Found.\n");
404 break;
406 case 21:
407 printk("Key Permission.\n");
408 break;
410 case 22:
411 printk("Instruction Access Rights.\n");
412 break;
414 case 24: /* General Exception */
415 code = (isr >> 4) & 0xf;
416 printk("General Exception: %s%s.\n", reason[code],
417 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
418 " (data access)") : "");
419 if (code == 8) {
420 # ifdef CONFIG_IA64_PRINT_HAZARDS
421 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
422 current->comm, current->pid,
423 regs->cr_iip + ia64_psr(regs)->ri,
424 regs->pr);
425 # endif
426 printf("ia64_fault: returning on hazard\n");
427 return;
428 }
429 break;
431 case 25:
432 printk("Disabled FP-Register.\n");
433 break;
435 case 26:
436 printk("NaT consumption.\n");
437 break;
439 case 29:
440 printk("Debug.\n");
441 break;
443 case 30:
444 printk("Unaligned Reference.\n");
445 break;
447 case 31:
448 printk("Unsupported data reference.\n");
449 break;
451 case 32:
452 printk("Floating-Point Fault.\n");
453 break;
455 case 33:
456 printk("Floating-Point Trap.\n");
457 break;
459 case 34:
460 printk("Lower Privilege Transfer Trap.\n");
461 break;
463 case 35:
464 printk("Taken Branch Trap.\n");
465 break;
467 case 36:
468 printk("Single Step Trap.\n");
469 break;
471 case 45:
472 printk("IA-32 Exception.\n");
473 break;
475 case 46:
476 printk("IA-32 Intercept.\n");
477 break;
479 case 47:
480 printk("IA-32 Interrupt.\n");
481 break;
483 default:
484 printk("Fault %lu\n", vector);
485 break;
486 }
488 show_registers(regs);
489 panic("Fault in Xen.\n");
490 }
492 unsigned long running_on_sim = 0;
495 /* Also read in hyperprivop.S */
496 int first_break = 0;
498 void
499 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
500 {
501 struct domain *d = current->domain;
502 struct vcpu *v = current;
503 IA64FAULT vector;
505 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
506 do_ssc(vcpu_get_gr(current,36), regs);
507 }
508 #ifdef CRASH_DEBUG
509 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
510 if (iim == 0)
511 show_registers(regs);
512 debugger_trap_fatal(0 /* don't care */, regs);
513 }
514 #endif
515 else if (iim == d->arch.breakimm) {
516 /* by default, do not continue */
517 v->arch.hypercall_continuation = 0;
519 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
520 if (!PSCBX(v, hypercall_continuation))
521 vcpu_increment_iip(current);
522 }
523 else reflect_interruption(isr, regs, vector);
524 }
525 else if (!PSCB(v,interrupt_collection_enabled)) {
526 if (ia64_hyperprivop(iim,regs))
527 vcpu_increment_iip(current);
528 }
529 else {
530 if (iim == 0)
531 die_if_kernel("bug check", regs, iim);
532 PSCB(v,iim) = iim;
533 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
534 }
535 }
537 void
538 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
539 {
540 IA64FAULT vector;
542 vector = priv_emulate(current,regs,isr);
543 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
544 // Note: if a path results in a vector to reflect that requires
545 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
546 reflect_interruption(isr,regs,vector);
547 }
548 }
550 void
551 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
552 {
553 struct vcpu *v = current;
554 unsigned long check_lazy_cover = 0;
555 unsigned long psr = regs->cr_ipsr;
557 /* Following faults shouldn'g be seen from Xen itself */
558 BUG_ON (!(psr & IA64_PSR_CPL));
560 switch(vector) {
561 case 8:
562 vector = IA64_DIRTY_BIT_VECTOR; break;
563 case 9:
564 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
565 case 10:
566 check_lazy_cover = 1;
567 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
568 case 20:
569 check_lazy_cover = 1;
570 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
571 case 22:
572 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
573 case 23:
574 check_lazy_cover = 1;
575 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
576 case 25:
577 vector = IA64_DISABLED_FPREG_VECTOR;
578 break;
579 case 26:
580 if (((isr >> 4L) & 0xfL) == 1) {
581 /* Fault is due to a register NaT consumption fault. */
582 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
583 printf("ia64_handle_reflection: handling regNaT fault\n");
584 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
585 }
586 #if 1
587 // pass null pointer dereferences through with no error
588 // but retain debug output for non-zero ifa
589 if (!ifa) {
590 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
591 }
592 #endif
593 #ifdef CONFIG_PRIVIFY
594 /* Some privified operations are coded using reg+64 instead
595 of reg. */
596 printf("*** NaT fault... attempting to handle as privop\n");
597 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
598 isr, ifa, regs->cr_iip, psr);
599 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
600 // certain NaT faults are higher priority than privop faults
601 vector = priv_emulate(v,regs,isr);
602 if (vector == IA64_NO_FAULT) {
603 printf("*** Handled privop masquerading as NaT fault\n");
604 return;
605 }
606 #endif
607 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
608 case 27:
609 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
610 PSCB(current,iim) = iim;
611 vector = IA64_SPECULATION_VECTOR; break;
612 case 30:
613 // FIXME: Should we handle unaligned refs in Xen??
614 vector = IA64_UNALIGNED_REF_VECTOR; break;
615 case 32:
616 if (!(handle_fpu_swa(1, regs, isr))) {
617 vcpu_increment_iip(v);
618 return;
619 }
620 printf("ia64_handle_reflection: handling FP fault\n");
621 vector = IA64_FP_FAULT_VECTOR; break;
622 case 33:
623 if (!(handle_fpu_swa(0, regs, isr))) return;
624 printf("ia64_handle_reflection: handling FP trap\n");
625 vector = IA64_FP_TRAP_VECTOR; break;
626 case 34:
627 printf("ia64_handle_reflection: handling lowerpriv trap\n");
628 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
629 case 35:
630 printf("ia64_handle_reflection: handling taken branch trap\n");
631 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
632 case 36:
633 printf("ia64_handle_reflection: handling single step trap\n");
634 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
636 default:
637 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
638 while(vector);
639 return;
640 }
641 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
642 PSCB(current,ifa) = ifa;
643 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
644 reflect_interruption(isr,regs,vector);
645 }