ia64/xen-unstable

view xen/arch/ia64/xen/faults.c @ 10704:c8bc76d877e0

[IA64] Fix fetch code method when FP fault occurs @VTi side

This patch intends to use __vmx_get_domain_bundle to fetch code
when FP fault @VTi side.

Singed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
author awilliam@xenbuild.aw
date Mon Jul 24 13:48:12 2006 -0600 (2006-07-24)
parents bdc0258e162a
children 86e5d8458c08
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <asm/system.h>
19 #include <asm/processor.h>
20 #include <xen/irq.h>
21 #include <xen/event.h>
22 #include <asm/privop.h>
23 #include <asm/vcpu.h>
24 #include <asm/ia64_int.h>
25 #include <asm/dom_fw.h>
26 #include <asm/vhpt.h>
27 #include <asm/debugger.h>
28 #include <asm/fpswa.h>
29 #include <asm/bundle.h>
30 #include <asm/privop_stat.h>
31 #include <asm/asm-xsi-offsets.h>
33 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
34 /* FIXME: where these declarations shold be there ? */
35 extern int ia64_hyperprivop(unsigned long, REGS *);
36 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
38 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
39 // note IA64_PSR_PK removed from following, why is this necessary?
40 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
41 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
42 IA64_PSR_IT | IA64_PSR_BN)
44 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
45 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
46 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
47 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
48 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
49 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
52 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
54 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
56 // should never panic domain... if it does, stack may have been overrun
57 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
58 {
59 struct vcpu *v = current;
61 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
62 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
63 }
64 vector &= ~0xf;
65 if (vector != IA64_DATA_TLB_VECTOR &&
66 vector != IA64_ALT_DATA_TLB_VECTOR &&
67 vector != IA64_VHPT_TRANS_VECTOR) {
68 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
69 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
70 }
71 }
73 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
74 {
75 struct vcpu *v = current;
77 if (!PSCB(v,interrupt_collection_enabled))
78 check_bad_nested_interruption(isr,regs,vector);
79 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
80 PSCB(v,precover_ifs) = regs->cr_ifs;
81 vcpu_bsw0(v);
82 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
83 PSCB(v,isr) = isr;
84 PSCB(v,iip) = regs->cr_iip;
85 PSCB(v,ifs) = 0;
86 PSCB(v,incomplete_regframe) = 0;
88 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
89 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
90 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
92 v->vcpu_info->evtchn_upcall_mask = 1;
93 PSCB(v,interrupt_collection_enabled) = 0;
95 inc_slow_reflect_count(vector);
96 }
98 static unsigned long pending_false_positive = 0;
100 void reflect_extint(struct pt_regs *regs)
101 {
102 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
103 struct vcpu *v = current;
104 static int first_extint = 1;
106 if (first_extint) {
107 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
108 first_extint = 0;
109 }
110 if (vcpu_timer_pending_early(v))
111 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
112 PSCB(current,itir) = 0;
113 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
114 }
116 void reflect_event(struct pt_regs *regs)
117 {
118 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
119 struct vcpu *v = current;
121 /* Sanity check */
122 if (is_idle_vcpu(v) || !user_mode(regs)) {
123 //printk("WARN: invocation to reflect_event in nested xen\n");
124 return;
125 }
127 if (!event_pending(v))
128 return;
130 if (!PSCB(v,interrupt_collection_enabled))
131 printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
132 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
133 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
134 PSCB(v,precover_ifs) = regs->cr_ifs;
135 vcpu_bsw0(v);
136 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
137 PSCB(v,isr) = isr;
138 PSCB(v,iip) = regs->cr_iip;
139 PSCB(v,ifs) = 0;
140 PSCB(v,incomplete_regframe) = 0;
142 regs->cr_iip = v->arch.event_callback_ip;
143 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
144 regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
146 v->vcpu_info->evtchn_upcall_mask = 1;
147 PSCB(v,interrupt_collection_enabled) = 0;
148 }
150 // ONLY gets called from ia64_leave_kernel
151 // ONLY call with interrupts disabled?? (else might miss one?)
152 // NEVER successful if already reflecting a trap/fault because psr.i==0
153 void deliver_pending_interrupt(struct pt_regs *regs)
154 {
155 struct domain *d = current->domain;
156 struct vcpu *v = current;
157 // FIXME: Will this work properly if doing an RFI???
158 if (!is_idle_domain(d) && user_mode(regs)) {
159 if (vcpu_deliverable_interrupts(v))
160 reflect_extint(regs);
161 else if (PSCB(v,pending_interruption))
162 ++pending_false_positive;
163 }
164 }
166 static int
167 handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
168 {
169 if (!PSCB(v,interrupt_collection_enabled)) {
170 PSCB(v,ifs) = regs->cr_ifs;
171 PSCB(v,incomplete_regframe) = 1;
172 regs->cr_ifs = 0;
173 lazy_cover_count++;
174 return(1); // retry same instruction with cr.ifs off
175 }
176 return(0);
177 }
179 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
180 {
181 unsigned long iip = regs->cr_iip, iha;
182 // FIXME should validate address here
183 unsigned long pteval;
184 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
185 IA64FAULT fault;
186 int is_ptc_l_needed = 0;
187 u64 logps;
189 if ((isr & IA64_ISR_SP)
190 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
191 {
192 /*
193 * This fault was due to a speculative load or lfetch.fault, set the "ed"
194 * bit in the psr to ensure forward progress. (Target register will get a
195 * NaT for ld.s, lfetch will be canceled.)
196 */
197 ia64_psr(regs)->ed = 1;
198 return;
199 }
201 again:
202 fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
203 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
204 struct p2m_entry entry;
205 pteval = translate_domain_pte(pteval, address, itir, &logps, &entry);
206 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
207 if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
208 p2m_entry_retry(&entry)) {
209 /* dtlb has been purged in-between. This dtlb was
210 matching. Undo the work. */
211 vcpu_flush_tlb_vhpt_range(address, logps);
213 // the stale entry which we inserted above
214 // may remains in tlb cache.
215 // we don't purge it now hoping next itc purges it.
216 is_ptc_l_needed = 1;
217 goto again;
218 }
219 return;
220 }
222 if (is_ptc_l_needed)
223 vcpu_ptc_l(current, address, logps);
224 if (!user_mode (regs)) {
225 /* The fault occurs inside Xen. */
226 if (!ia64_done_with_exception(regs)) {
227 // should never happen. If it does, region 0 addr may
228 // indicate a bad xen pointer
229 printk("*** xen_handle_domain_access: exception table"
230 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
231 iip, address);
232 panic_domain(regs,"*** xen_handle_domain_access: exception table"
233 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
234 iip, address);
235 }
236 return;
237 }
239 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
240 return;
242 if (!PSCB(current,interrupt_collection_enabled)) {
243 check_bad_nested_interruption(isr,regs,fault);
244 //printf("Delivering NESTED DATA TLB fault\n");
245 fault = IA64_DATA_NESTED_TLB_VECTOR;
246 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
247 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
248 // NOTE: nested trap must NOT pass PSCB address
249 //regs->r31 = (unsigned long) &PSCB(current);
250 inc_slow_reflect_count(fault);
251 return;
252 }
254 PSCB(current,itir) = itir;
255 PSCB(current,iha) = iha;
256 PSCB(current,ifa) = address;
257 reflect_interruption(isr, regs, fault);
258 }
260 fpswa_interface_t *fpswa_interface = 0;
262 void trap_init (void)
263 {
264 if (ia64_boot_param->fpswa)
265 /* FPSWA fixup: make the interface pointer a virtual address: */
266 fpswa_interface = __va(ia64_boot_param->fpswa);
267 else
268 printk("No FPSWA supported.\n");
269 }
271 static fpswa_ret_t
272 fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
273 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
274 unsigned long *ifs, struct pt_regs *regs)
275 {
276 fp_state_t fp_state;
277 fpswa_ret_t ret;
279 if (!fpswa_interface)
280 return ((fpswa_ret_t) {-1, 0, 0, 0});
282 memset(&fp_state, 0, sizeof(fp_state_t));
284 /*
285 * compute fp_state. only FP registers f6 - f11 are used by the
286 * kernel, so set those bits in the mask and set the low volatile
287 * pointer to point to these registers.
288 */
289 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
291 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
292 /*
293 * unsigned long (*EFI_FPSWA) (
294 * unsigned long trap_type,
295 * void *Bundle,
296 * unsigned long *pipsr,
297 * unsigned long *pfsr,
298 * unsigned long *pisr,
299 * unsigned long *ppreds,
300 * unsigned long *pifs,
301 * void *fp_state);
302 */
303 ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
304 ipsr, fpsr, isr, pr, ifs, &fp_state);
306 return ret;
307 }
309 /*
310 * Handle floating-point assist faults and traps for domain.
311 */
312 unsigned long
313 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
314 {
315 struct vcpu *v = current;
316 IA64_BUNDLE bundle;
317 unsigned long fault_ip;
318 fpswa_ret_t ret;
320 fault_ip = regs->cr_iip;
321 /*
322 * When the FP trap occurs, the trapping instruction is completed.
323 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
324 */
325 if (!fp_fault && (ia64_psr(regs)->ri == 0))
326 fault_ip -= 16;
328 if (VMX_DOMAIN(current))
329 bundle = __vmx_get_domain_bundle(fault_ip);
330 else
331 bundle = __get_domain_bundle(fault_ip);
333 if (!bundle.i64[0] && !bundle.i64[1]) {
334 printk("%s: floating-point bundle at 0x%lx not mapped\n",
335 __FUNCTION__, fault_ip);
336 return -1;
337 }
339 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
340 &isr, &regs->pr, &regs->cr_ifs, regs);
342 if (ret.status) {
343 PSCBX(v, fpswa_ret) = ret;
344 printk("%s(%s): fp_emulate() returned %ld\n",
345 __FUNCTION__, fp_fault?"fault":"trap", ret.status);
346 }
348 return ret.status;
349 }
351 void
352 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
353 unsigned long iim, unsigned long itir, unsigned long arg5,
354 unsigned long arg6, unsigned long arg7, unsigned long stack)
355 {
356 struct pt_regs *regs = (struct pt_regs *) &stack;
357 unsigned long code;
358 static const char * const reason[] = {
359 "IA-64 Illegal Operation fault",
360 "IA-64 Privileged Operation fault",
361 "IA-64 Privileged Register fault",
362 "IA-64 Reserved Register/Field fault",
363 "Disabled Instruction Set Transition fault",
364 "Unknown fault 5", "Unknown fault 6",
365 "Unknown fault 7", "Illegal Hazard fault",
366 "Unknown fault 9", "Unknown fault 10",
367 "Unknown fault 11", "Unknown fault 12",
368 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
369 };
371 printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
372 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
375 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
376 /*
377 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
378 * the lfetch.
379 */
380 ia64_psr(regs)->ed = 1;
381 printf("ia64_fault: handled lfetch.fault\n");
382 return;
383 }
385 switch (vector) {
386 case 0:
387 printk("VHPT Translation.\n");
388 break;
390 case 4:
391 printk("Alt DTLB.\n");
392 break;
394 case 6:
395 printk("Instruction Key Miss.\n");
396 break;
398 case 7:
399 printk("Data Key Miss.\n");
400 break;
402 case 8:
403 printk("Dirty-bit.\n");
404 break;
406 case 20:
407 printk("Page Not Found.\n");
408 break;
410 case 21:
411 printk("Key Permission.\n");
412 break;
414 case 22:
415 printk("Instruction Access Rights.\n");
416 break;
418 case 24: /* General Exception */
419 code = (isr >> 4) & 0xf;
420 printk("General Exception: %s%s.\n", reason[code],
421 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
422 " (data access)") : "");
423 if (code == 8) {
424 # ifdef CONFIG_IA64_PRINT_HAZARDS
425 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
426 current->comm, current->pid,
427 regs->cr_iip + ia64_psr(regs)->ri,
428 regs->pr);
429 # endif
430 printf("ia64_fault: returning on hazard\n");
431 return;
432 }
433 break;
435 case 25:
436 printk("Disabled FP-Register.\n");
437 break;
439 case 26:
440 printk("NaT consumption.\n");
441 break;
443 case 29:
444 printk("Debug.\n");
445 break;
447 case 30:
448 printk("Unaligned Reference.\n");
449 break;
451 case 31:
452 printk("Unsupported data reference.\n");
453 break;
455 case 32:
456 printk("Floating-Point Fault.\n");
457 break;
459 case 33:
460 printk("Floating-Point Trap.\n");
461 break;
463 case 34:
464 printk("Lower Privilege Transfer Trap.\n");
465 break;
467 case 35:
468 printk("Taken Branch Trap.\n");
469 break;
471 case 36:
472 printk("Single Step Trap.\n");
473 break;
475 case 45:
476 printk("IA-32 Exception.\n");
477 break;
479 case 46:
480 printk("IA-32 Intercept.\n");
481 break;
483 case 47:
484 printk("IA-32 Interrupt.\n");
485 break;
487 default:
488 printk("Fault %lu\n", vector);
489 break;
490 }
492 show_registers(regs);
493 panic("Fault in Xen.\n");
494 }
496 unsigned long running_on_sim = 0;
499 /* Also read in hyperprivop.S */
500 int first_break = 0;
502 void
503 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
504 {
505 struct domain *d = current->domain;
506 struct vcpu *v = current;
507 IA64FAULT vector;
509 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
510 do_ssc(vcpu_get_gr(current,36), regs);
511 }
512 #ifdef CRASH_DEBUG
513 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
514 if (iim == 0)
515 show_registers(regs);
516 debugger_trap_fatal(0 /* don't care */, regs);
517 }
518 #endif
519 else if (iim == d->arch.breakimm) {
520 /* by default, do not continue */
521 v->arch.hypercall_continuation = 0;
523 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
524 if (!PSCBX(v, hypercall_continuation))
525 vcpu_increment_iip(current);
526 }
527 else reflect_interruption(isr, regs, vector);
528 }
529 else if (!PSCB(v,interrupt_collection_enabled)) {
530 if (ia64_hyperprivop(iim,regs))
531 vcpu_increment_iip(current);
532 }
533 else {
534 if (iim == 0)
535 die_if_kernel("bug check", regs, iim);
536 PSCB(v,iim) = iim;
537 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
538 }
539 }
541 void
542 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
543 {
544 IA64FAULT vector;
546 vector = priv_emulate(current,regs,isr);
547 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
548 // Note: if a path results in a vector to reflect that requires
549 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
550 reflect_interruption(isr,regs,vector);
551 }
552 }
554 void
555 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
556 {
557 struct vcpu *v = current;
558 unsigned long check_lazy_cover = 0;
559 unsigned long psr = regs->cr_ipsr;
561 /* Following faults shouldn'g be seen from Xen itself */
562 BUG_ON (!(psr & IA64_PSR_CPL));
564 switch(vector) {
565 case 8:
566 vector = IA64_DIRTY_BIT_VECTOR; break;
567 case 9:
568 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
569 case 10:
570 check_lazy_cover = 1;
571 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
572 case 20:
573 check_lazy_cover = 1;
574 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
575 case 22:
576 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
577 case 23:
578 check_lazy_cover = 1;
579 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
580 case 25:
581 vector = IA64_DISABLED_FPREG_VECTOR;
582 break;
583 case 26:
584 if (((isr >> 4L) & 0xfL) == 1) {
585 /* Fault is due to a register NaT consumption fault. */
586 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
587 printf("ia64_handle_reflection: handling regNaT fault\n");
588 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
589 }
590 #if 1
591 // pass null pointer dereferences through with no error
592 // but retain debug output for non-zero ifa
593 if (!ifa) {
594 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
595 }
596 #endif
597 #ifdef CONFIG_PRIVIFY
598 /* Some privified operations are coded using reg+64 instead
599 of reg. */
600 printf("*** NaT fault... attempting to handle as privop\n");
601 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
602 isr, ifa, regs->cr_iip, psr);
603 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
604 // certain NaT faults are higher priority than privop faults
605 vector = priv_emulate(v,regs,isr);
606 if (vector == IA64_NO_FAULT) {
607 printf("*** Handled privop masquerading as NaT fault\n");
608 return;
609 }
610 #endif
611 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
612 case 27:
613 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
614 PSCB(current,iim) = iim;
615 vector = IA64_SPECULATION_VECTOR; break;
616 case 30:
617 // FIXME: Should we handle unaligned refs in Xen??
618 vector = IA64_UNALIGNED_REF_VECTOR; break;
619 case 32:
620 if (!(handle_fpu_swa(1, regs, isr))) {
621 vcpu_increment_iip(v);
622 return;
623 }
624 printf("ia64_handle_reflection: handling FP fault\n");
625 vector = IA64_FP_FAULT_VECTOR; break;
626 case 33:
627 if (!(handle_fpu_swa(0, regs, isr))) return;
628 printf("ia64_handle_reflection: handling FP trap\n");
629 vector = IA64_FP_TRAP_VECTOR; break;
630 case 34:
631 printf("ia64_handle_reflection: handling lowerpriv trap\n");
632 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
633 case 35:
634 printf("ia64_handle_reflection: handling taken branch trap\n");
635 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
636 case 36:
637 printf("ia64_handle_reflection: handling single step trap\n");
638 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
640 default:
641 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
642 while(vector);
643 return;
644 }
645 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
646 PSCB(current,ifa) = ifa;
647 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
648 reflect_interruption(isr,regs,vector);
649 }