ia64/xen-unstable

view xen/arch/ia64/xen/process.c @ 10157:faae893d428e

[IA64] support FPSWA emulation

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Thu May 25 15:47:33 2006 -0600 (2006-05-25)
parents 40959bc0a269
children 003157eafd66
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 #include <asm/desc.h>
24 //#include <asm/ldt.h>
25 #include <xen/irq.h>
26 #include <xen/event.h>
27 #include <asm/regionreg.h>
28 #include <asm/privop.h>
29 #include <asm/vcpu.h>
30 #include <asm/ia64_int.h>
31 #include <asm/dom_fw.h>
32 #include <asm/vhpt.h>
33 #include "hpsim_ssc.h"
34 #include <xen/multicall.h>
35 #include <asm/debugger.h>
36 #include <asm/fpswa.h>
38 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
39 /* FIXME: where these declarations shold be there ? */
40 extern void panic_domain(struct pt_regs *, const char *, ...);
41 extern long platform_is_hp_ski(void);
42 extern int ia64_hyperprivop(unsigned long, REGS *);
43 extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
44 extern void vmx_do_launch(struct vcpu *);
45 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
47 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
48 // note IA64_PSR_PK removed from following, why is this necessary?
49 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
50 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
51 IA64_PSR_IT | IA64_PSR_BN)
53 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
54 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
55 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
56 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
57 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
58 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
60 #include <xen/sched-if.h>
62 void schedule_tail(struct vcpu *prev)
63 {
64 extern char ia64_ivt;
65 context_saved(prev);
67 if (VMX_DOMAIN(current)) {
68 vmx_do_launch(current);
69 } else {
70 ia64_set_iva(&ia64_ivt);
71 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
72 VHPT_ENABLED);
73 load_region_regs(current);
74 vcpu_load_kernel_regs(current);
75 }
76 }
78 void tdpfoo(void) { }
80 // given a domain virtual address, pte and pagesize, extract the metaphysical
81 // address, convert the pte for a physical address for (possibly different)
82 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
83 // PAGE_SIZE!)
84 u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
85 {
86 struct domain *d = current->domain;
87 ia64_itir_t itir = {.itir = itir__};
88 u64 mask, mpaddr, pteval2;
89 u64 arflags;
90 u64 arflags2;
92 pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
94 // FIXME address had better be pre-validated on insert
95 mask = ~itir_mask(itir.itir);
96 mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
97 (address & mask);
98 #ifdef CONFIG_XEN_IA64_DOM0_VP
99 if (itir.ps > PAGE_SHIFT) {
100 itir.ps = PAGE_SHIFT;
101 }
102 #endif
103 *logps = itir.ps;
104 #ifndef CONFIG_XEN_IA64_DOM0_VP
105 if (d == dom0) {
106 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
107 /*
108 printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
109 mpaddr, ia64_get_itc());
110 */
111 tdpfoo();
112 }
113 }
114 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
115 /* Address beyond the limit. However the grant table is
116 also beyond the limit. Display a message if not in the
117 grant table. */
118 if (mpaddr >= IA64_GRANT_TABLE_PADDR
119 && mpaddr < (IA64_GRANT_TABLE_PADDR
120 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
121 printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
122 "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
123 mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
124 address, pteval, itir.itir);
125 tdpfoo();
126 }
127 #endif
128 pteval2 = lookup_domain_mpa(d,mpaddr);
129 arflags = pteval & _PAGE_AR_MASK;
130 arflags2 = pteval2 & _PAGE_AR_MASK;
131 if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
132 #if 0
133 DPRINTK("%s:%d "
134 "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
135 "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
136 __func__, __LINE__,
137 pteval, arflags, address, itir__,
138 pteval2, arflags2, mpaddr);
139 #endif
140 pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
141 }
143 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
144 pteval2 |= (pteval & _PAGE_ED);
145 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
146 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
147 return pteval2;
148 }
150 // given a current domain metaphysical address, return the physical address
151 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
152 {
153 unsigned long pteval;
155 #ifndef CONFIG_XEN_IA64_DOM0_VP
156 if (current->domain == dom0) {
157 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
158 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
159 mpaddr);
160 tdpfoo();
161 }
162 }
163 #endif
164 pteval = lookup_domain_mpa(current->domain,mpaddr);
165 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
166 }
168 unsigned long slow_reflect_count[0x80] = { 0 };
169 unsigned long fast_reflect_count[0x80] = { 0 };
171 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
173 void zero_reflect_counts(void)
174 {
175 int i;
176 for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
177 for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
178 }
180 int dump_reflect_counts(char *buf)
181 {
182 int i,j,cnt;
183 char *s = buf;
185 s += sprintf(s,"Slow reflections by vector:\n");
186 for (i = 0, j = 0; i < 0x80; i++) {
187 if ( (cnt = slow_reflect_count[i]) != 0 ) {
188 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
189 if ((j++ & 3) == 3) s += sprintf(s,"\n");
190 }
191 }
192 if (j & 3) s += sprintf(s,"\n");
193 s += sprintf(s,"Fast reflections by vector:\n");
194 for (i = 0, j = 0; i < 0x80; i++) {
195 if ( (cnt = fast_reflect_count[i]) != 0 ) {
196 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
197 if ((j++ & 3) == 3) s += sprintf(s,"\n");
198 }
199 }
200 if (j & 3) s += sprintf(s,"\n");
201 return s - buf;
202 }
204 // should never panic domain... if it does, stack may have been overrun
205 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
206 {
207 struct vcpu *v = current;
209 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
210 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
211 }
212 vector &= ~0xf;
213 if (vector != IA64_DATA_TLB_VECTOR &&
214 vector != IA64_ALT_DATA_TLB_VECTOR &&
215 vector != IA64_VHPT_TRANS_VECTOR) {
216 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
217 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
218 }
219 }
221 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
222 {
223 struct vcpu *v = current;
225 if (!PSCB(v,interrupt_collection_enabled))
226 check_bad_nested_interruption(isr,regs,vector);
227 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
228 PSCB(v,precover_ifs) = regs->cr_ifs;
229 vcpu_bsw0(v);
230 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
231 PSCB(v,isr) = isr;
232 PSCB(v,iip) = regs->cr_iip;
233 PSCB(v,ifs) = 0;
234 PSCB(v,incomplete_regframe) = 0;
236 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
237 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
238 regs->r31 = XSI_IPSR;
240 v->vcpu_info->evtchn_upcall_mask = 1;
241 PSCB(v,interrupt_collection_enabled) = 0;
243 inc_slow_reflect_count(vector);
244 }
246 void foodpi(void) {}
248 static unsigned long pending_false_positive = 0;
250 void reflect_extint(struct pt_regs *regs)
251 {
252 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
253 struct vcpu *v = current;
254 static int first_extint = 1;
256 if (first_extint) {
257 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
258 first_extint = 0;
259 }
260 if (vcpu_timer_pending_early(v))
261 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
262 PSCB(current,itir) = 0;
263 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
264 }
266 void reflect_event(struct pt_regs *regs)
267 {
268 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
269 struct vcpu *v = current;
271 /* Sanity check */
272 if (is_idle_vcpu(v) || !user_mode(regs)) {
273 //printk("WARN: invocation to reflect_event in nested xen\n");
274 return;
275 }
277 if (!event_pending(v))
278 return;
280 if (!PSCB(v,interrupt_collection_enabled))
281 printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
282 regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
283 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
284 PSCB(v,precover_ifs) = regs->cr_ifs;
285 vcpu_bsw0(v);
286 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
287 PSCB(v,isr) = isr;
288 PSCB(v,iip) = regs->cr_iip;
289 PSCB(v,ifs) = 0;
290 PSCB(v,incomplete_regframe) = 0;
292 regs->cr_iip = v->arch.event_callback_ip;
293 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
294 regs->r31 = XSI_IPSR;
296 v->vcpu_info->evtchn_upcall_mask = 1;
297 PSCB(v,interrupt_collection_enabled) = 0;
298 }
300 // ONLY gets called from ia64_leave_kernel
301 // ONLY call with interrupts disabled?? (else might miss one?)
302 // NEVER successful if already reflecting a trap/fault because psr.i==0
303 void deliver_pending_interrupt(struct pt_regs *regs)
304 {
305 struct domain *d = current->domain;
306 struct vcpu *v = current;
307 // FIXME: Will this work properly if doing an RFI???
308 if (!is_idle_domain(d) && user_mode(regs)) {
309 if (vcpu_deliverable_interrupts(v))
310 reflect_extint(regs);
311 else if (PSCB(v,pending_interruption))
312 ++pending_false_positive;
313 }
314 }
315 unsigned long lazy_cover_count = 0;
317 static int
318 handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
319 {
320 if (!PSCB(v,interrupt_collection_enabled)) {
321 PSCB(v,ifs) = regs->cr_ifs;
322 PSCB(v,incomplete_regframe) = 1;
323 regs->cr_ifs = 0;
324 lazy_cover_count++;
325 return(1); // retry same instruction with cr.ifs off
326 }
327 return(0);
328 }
330 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
331 {
332 unsigned long iip = regs->cr_iip, iha;
333 // FIXME should validate address here
334 unsigned long pteval;
335 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
336 IA64FAULT fault;
338 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
339 if ((isr & IA64_ISR_SP)
340 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
341 {
342 /*
343 * This fault was due to a speculative load or lfetch.fault, set the "ed"
344 * bit in the psr to ensure forward progress. (Target register will get a
345 * NaT for ld.s, lfetch will be canceled.)
346 */
347 ia64_psr(regs)->ed = 1;
348 return;
349 }
351 again:
352 fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
353 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
354 u64 logps;
355 pteval = translate_domain_pte(pteval, address, itir, &logps);
356 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
357 if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
358 /* dtlb has been purged in-between. This dtlb was
359 matching. Undo the work. */
360 vcpu_flush_tlb_vhpt_range (address, 1);
361 goto again;
362 }
363 return;
364 }
366 if (!user_mode (regs)) {
367 /* The fault occurs inside Xen. */
368 if (!ia64_done_with_exception(regs)) {
369 // should never happen. If it does, region 0 addr may
370 // indicate a bad xen pointer
371 printk("*** xen_handle_domain_access: exception table"
372 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
373 iip, address);
374 panic_domain(regs,"*** xen_handle_domain_access: exception table"
375 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
376 iip, address);
377 }
378 return;
379 }
380 if (!PSCB(current,interrupt_collection_enabled)) {
381 check_bad_nested_interruption(isr,regs,fault);
382 //printf("Delivering NESTED DATA TLB fault\n");
383 fault = IA64_DATA_NESTED_TLB_VECTOR;
384 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
385 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
386 // NOTE: nested trap must NOT pass PSCB address
387 //regs->r31 = (unsigned long) &PSCB(current);
388 inc_slow_reflect_count(fault);
389 return;
390 }
392 PSCB(current,itir) = itir;
393 PSCB(current,iha) = iha;
394 PSCB(current,ifa) = address;
395 reflect_interruption(isr, regs, fault);
396 }
398 fpswa_interface_t *fpswa_interface = 0;
400 void trap_init (void)
401 {
402 if (ia64_boot_param->fpswa)
403 /* FPSWA fixup: make the interface pointer a virtual address: */
404 fpswa_interface = __va(ia64_boot_param->fpswa);
405 else
406 printk("No FPSWA supported.\n");
407 }
409 static fpswa_ret_t
410 fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
411 struct pt_regs *regs)
412 {
413 fp_state_t fp_state;
414 fpswa_ret_t ret;
416 if (!fpswa_interface)
417 return ((fpswa_ret_t) {-1, 0, 0, 0});
419 memset(&fp_state, 0, sizeof(fp_state_t));
421 /*
422 * compute fp_state. only FP registers f6 - f11 are used by the
423 * kernel, so set those bits in the mask and set the low volatile
424 * pointer to point to these registers.
425 */
426 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
428 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
429 /*
430 * unsigned long (*EFI_FPSWA) (
431 * unsigned long trap_type,
432 * void *Bundle,
433 * unsigned long *pipsr,
434 * unsigned long *pfsr,
435 * unsigned long *pisr,
436 * unsigned long *ppreds,
437 * unsigned long *pifs,
438 * void *fp_state);
439 */
440 ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
441 (unsigned long *) ipsr, (unsigned long *) fpsr,
442 (unsigned long *) isr, (unsigned long *) pr,
443 (unsigned long *) ifs, &fp_state);
445 return ret;
446 }
448 /*
449 * Handle floating-point assist faults and traps for domain.
450 */
451 static unsigned long
452 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
453 {
454 struct vcpu *v = current;
455 IA64_BUNDLE bundle;
456 IA64_BUNDLE __get_domain_bundle(UINT64);
457 unsigned long fault_ip;
458 fpswa_ret_t ret;
460 fault_ip = regs->cr_iip;
461 /*
462 * When the FP trap occurs, the trapping instruction is completed.
463 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
464 */
465 if (!fp_fault && (ia64_psr(regs)->ri == 0))
466 fault_ip -= 16;
467 bundle = __get_domain_bundle(fault_ip);
468 if (!bundle.i64[0] && !bundle.i64[1]) {
469 printk("%s: floating-point bundle at 0x%lx not mapped\n",
470 __FUNCTION__, fault_ip);
471 return -1;
472 }
474 ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
475 &isr, &regs->pr, &regs->cr_ifs, regs);
477 if (ret.status) {
478 PSCBX(v, fpswa_ret) = ret;
479 printk("%s(%s): fp_emulate() returned %ld\n",
480 __FUNCTION__, fp_fault?"fault":"trap", ret.status);
481 } else {
482 if (fp_fault) {
483 /* emulation was successful */
484 vcpu_increment_iip(v);
485 }
486 }
488 return ret.status;
489 }
491 void
492 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
493 unsigned long iim, unsigned long itir, unsigned long arg5,
494 unsigned long arg6, unsigned long arg7, unsigned long stack)
495 {
496 struct pt_regs *regs = (struct pt_regs *) &stack;
497 unsigned long code;
498 char buf[128];
499 static const char *reason[] = {
500 "IA-64 Illegal Operation fault",
501 "IA-64 Privileged Operation fault",
502 "IA-64 Privileged Register fault",
503 "IA-64 Reserved Register/Field fault",
504 "Disabled Instruction Set Transition fault",
505 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
506 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
507 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
508 };
510 printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
511 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
514 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
515 /*
516 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
517 * the lfetch.
518 */
519 ia64_psr(regs)->ed = 1;
520 printf("ia64_fault: handled lfetch.fault\n");
521 return;
522 }
524 switch (vector) {
525 case 0:
526 printk("VHPT Translation.\n");
527 break;
529 case 4:
530 printk("Alt DTLB.\n");
531 break;
533 case 6:
534 printk("Instruction Key Miss.\n");
535 break;
537 case 7:
538 printk("Data Key Miss.\n");
539 break;
541 case 8:
542 printk("Dirty-bit.\n");
543 break;
545 case 20:
546 printk("Page Not Found.\n");
547 break;
549 case 21:
550 printk("Key Permission.\n");
551 break;
553 case 22:
554 printk("Instruction Access Rights.\n");
555 break;
557 case 24: /* General Exception */
558 code = (isr >> 4) & 0xf;
559 sprintf(buf, "General Exception: %s%s", reason[code],
560 (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
561 " (data access)") : "");
562 if (code == 8) {
563 # ifdef CONFIG_IA64_PRINT_HAZARDS
564 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
565 current->comm, current->pid,
566 regs->cr_iip + ia64_psr(regs)->ri,
567 regs->pr);
568 # endif
569 printf("ia64_fault: returning on hazard\n");
570 return;
571 }
572 break;
574 case 25:
575 printk("Disabled FP-Register.\n");
576 break;
578 case 26:
579 printk("NaT consumption.\n");
580 break;
582 case 29:
583 printk("Debug.\n");
584 break;
586 case 30:
587 printk("Unaligned Reference.\n");
588 break;
590 case 31:
591 printk("Unsupported data reference.\n");
592 break;
594 case 32:
595 printk("Floating-Point Fault.\n");
596 break;
598 case 33:
599 printk("Floating-Point Trap.\n");
600 break;
602 case 34:
603 printk("Lower Privilege Transfer Trap.\n");
604 break;
606 case 35:
607 printk("Taken Branch Trap.\n");
608 break;
610 case 36:
611 printk("Single Step Trap.\n");
612 break;
614 case 45:
615 printk("IA-32 Exception.\n");
616 break;
618 case 46:
619 printk("IA-32 Intercept.\n");
620 break;
622 case 47:
623 printk("IA-32 Interrupt.\n");
624 break;
626 default:
627 printk("Fault %lu\n", vector);
628 break;
629 }
631 show_registers(regs);
632 panic("Fault in Xen.\n");
633 }
635 unsigned long running_on_sim = 0;
637 void
638 do_ssc(unsigned long ssc, struct pt_regs *regs)
639 {
640 unsigned long arg0, arg1, arg2, arg3, retval;
641 char buf[2];
642 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
643 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
644 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
646 arg0 = vcpu_get_gr(current,32);
647 switch(ssc) {
648 case SSC_PUTCHAR:
649 buf[0] = arg0;
650 buf[1] = '\0';
651 printf(buf);
652 break;
653 case SSC_GETCHAR:
654 retval = ia64_ssc(0,0,0,0,ssc);
655 vcpu_set_gr(current,8,retval,0);
656 break;
657 case SSC_WAIT_COMPLETION:
658 if (arg0) { // metaphysical address
660 arg0 = translate_domain_mpaddr(arg0);
661 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
662 ///**/ if (stat->fd == last_fd) stat->count = last_count;
663 /**/ stat->count = last_count;
664 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
665 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
666 /**/ retval = 0;
667 }
668 else retval = -1L;
669 vcpu_set_gr(current,8,retval,0);
670 break;
671 case SSC_OPEN:
672 arg1 = vcpu_get_gr(current,33); // access rights
673 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
674 if (arg0) { // metaphysical address
675 arg0 = translate_domain_mpaddr(arg0);
676 retval = ia64_ssc(arg0,arg1,0,0,ssc);
677 }
678 else retval = -1L;
679 vcpu_set_gr(current,8,retval,0);
680 break;
681 case SSC_WRITE:
682 case SSC_READ:
683 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
684 arg1 = vcpu_get_gr(current,33);
685 arg2 = vcpu_get_gr(current,34);
686 arg3 = vcpu_get_gr(current,35);
687 if (arg2) { // metaphysical address of descriptor
688 struct ssc_disk_req *req;
689 unsigned long mpaddr;
690 long len;
692 arg2 = translate_domain_mpaddr(arg2);
693 req = (struct ssc_disk_req *) __va(arg2);
694 req->len &= 0xffffffffL; // avoid strange bug
695 len = req->len;
696 /**/ last_fd = arg1;
697 /**/ last_count = len;
698 mpaddr = req->addr;
699 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
700 retval = 0;
701 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
702 // do partial page first
703 req->addr = translate_domain_mpaddr(mpaddr);
704 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
705 len -= req->len; mpaddr += req->len;
706 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
707 arg3 += req->len; // file offset
708 /**/ last_stat.fd = last_fd;
709 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
710 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
711 }
712 if (retval >= 0) while (len > 0) {
713 req->addr = translate_domain_mpaddr(mpaddr);
714 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
715 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
716 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
717 arg3 += req->len; // file offset
718 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
719 /**/ last_stat.fd = last_fd;
720 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
721 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
722 }
723 // set it back to the original value
724 req->len = last_count;
725 }
726 else retval = -1L;
727 vcpu_set_gr(current,8,retval,0);
728 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
729 break;
730 case SSC_CONNECT_INTERRUPT:
731 arg1 = vcpu_get_gr(current,33);
732 arg2 = vcpu_get_gr(current,34);
733 arg3 = vcpu_get_gr(current,35);
734 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
735 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
736 break;
737 case SSC_NETDEV_PROBE:
738 vcpu_set_gr(current,8,-1L,0);
739 break;
740 default:
741 printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
742 ssc, regs->cr_iip, regs->b0);
743 while(1);
744 break;
745 }
746 vcpu_increment_iip(current);
747 }
749 /* Also read in hyperprivop.S */
750 int first_break = 1;
752 void
753 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
754 {
755 struct domain *d = current->domain;
756 struct vcpu *v = current;
757 IA64FAULT vector;
759 if (first_break) {
760 if (platform_is_hp_ski()) running_on_sim = 1;
761 else running_on_sim = 0;
762 first_break = 0;
763 }
764 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
765 do_ssc(vcpu_get_gr(current,36), regs);
766 }
767 #ifdef CRASH_DEBUG
768 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
769 if (iim == 0)
770 show_registers(regs);
771 debugger_trap_fatal(0 /* don't care */, regs);
772 }
773 #endif
774 else if (iim == d->arch.breakimm) {
775 /* by default, do not continue */
776 v->arch.hypercall_continuation = 0;
778 if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
779 if (!PSCBX(v, hypercall_continuation))
780 vcpu_increment_iip(current);
781 }
782 else reflect_interruption(isr, regs, vector);
783 }
784 else if (!PSCB(v,interrupt_collection_enabled)) {
785 if (ia64_hyperprivop(iim,regs))
786 vcpu_increment_iip(current);
787 }
788 else {
789 if (iim == 0)
790 die_if_kernel("bug check", regs, iim);
791 PSCB(v,iim) = iim;
792 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
793 }
794 }
796 void
797 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
798 {
799 IA64FAULT vector;
801 vector = priv_emulate(current,regs,isr);
802 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
803 // Note: if a path results in a vector to reflect that requires
804 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
805 reflect_interruption(isr,regs,vector);
806 }
807 }
809 /* Used in vhpt.h. */
810 #define INTR_TYPE_MAX 10
811 UINT64 int_counts[INTR_TYPE_MAX];
813 void
814 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
815 {
816 struct vcpu *v = current;
817 unsigned long check_lazy_cover = 0;
818 unsigned long psr = regs->cr_ipsr;
820 /* Following faults shouldn'g be seen from Xen itself */
821 if (!(psr & IA64_PSR_CPL)) BUG();
823 switch(vector) {
824 case 8:
825 vector = IA64_DIRTY_BIT_VECTOR; break;
826 case 9:
827 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
828 case 10:
829 check_lazy_cover = 1;
830 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
831 case 20:
832 check_lazy_cover = 1;
833 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
834 case 22:
835 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
836 case 23:
837 check_lazy_cover = 1;
838 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
839 case 25:
840 vector = IA64_DISABLED_FPREG_VECTOR;
841 break;
842 case 26:
843 if (((isr >> 4L) & 0xfL) == 1) {
844 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
845 printf("ia64_handle_reflection: handling regNaT fault");
846 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
847 }
848 #if 1
849 // pass null pointer dereferences through with no error
850 // but retain debug output for non-zero ifa
851 if (!ifa) {
852 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
853 }
854 #endif
855 printf("*** NaT fault... attempting to handle as privop\n");
856 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
857 isr, ifa, regs->cr_iip, psr);
858 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
859 // certain NaT faults are higher priority than privop faults
860 vector = priv_emulate(v,regs,isr);
861 if (vector == IA64_NO_FAULT) {
862 printf("*** Handled privop masquerading as NaT fault\n");
863 return;
864 }
865 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
866 case 27:
867 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
868 PSCB(current,iim) = iim;
869 vector = IA64_SPECULATION_VECTOR; break;
870 case 30:
871 // FIXME: Should we handle unaligned refs in Xen??
872 vector = IA64_UNALIGNED_REF_VECTOR; break;
873 case 32:
874 if (!(handle_fpu_swa(1, regs, isr))) return;
875 printf("ia64_handle_reflection: handling FP fault\n");
876 vector = IA64_FP_FAULT_VECTOR; break;
877 case 33:
878 if (!(handle_fpu_swa(0, regs, isr))) return;
879 printf("ia64_handle_reflection: handling FP trap\n");
880 vector = IA64_FP_TRAP_VECTOR; break;
881 case 34:
882 printf("ia64_handle_reflection: handling lowerpriv trap");
883 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
884 case 35:
885 printf("ia64_handle_reflection: handling taken branch trap");
886 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
887 case 36:
888 printf("ia64_handle_reflection: handling single step trap");
889 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
891 default:
892 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
893 while(vector);
894 return;
895 }
896 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
897 PSCB(current,ifa) = ifa;
898 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
899 reflect_interruption(isr,regs,vector);
900 }
902 unsigned long hypercall_create_continuation(
903 unsigned int op, const char *format, ...)
904 {
905 struct mc_state *mcs = &mc_state[smp_processor_id()];
906 struct vcpu *v = current;
907 const char *p = format;
908 unsigned long arg;
909 unsigned int i;
910 va_list args;
912 va_start(args, format);
913 if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
914 panic("PREEMPT happen in multicall\n"); // Not support yet
915 } else {
916 vcpu_set_gr(v, 2, op, 0);
917 for ( i = 0; *p != '\0'; i++) {
918 switch ( *p++ )
919 {
920 case 'i':
921 arg = (unsigned long)va_arg(args, unsigned int);
922 break;
923 case 'l':
924 arg = (unsigned long)va_arg(args, unsigned long);
925 break;
926 case 'h':
927 arg = (unsigned long)va_arg(args, void *);
928 break;
929 default:
930 arg = 0;
931 BUG();
932 }
933 switch (i) {
934 case 0: vcpu_set_gr(v, 14, arg, 0);
935 break;
936 case 1: vcpu_set_gr(v, 15, arg, 0);
937 break;
938 case 2: vcpu_set_gr(v, 16, arg, 0);
939 break;
940 case 3: vcpu_set_gr(v, 17, arg, 0);
941 break;
942 case 4: vcpu_set_gr(v, 18, arg, 0);
943 break;
944 default: panic("Too many args for hypercall continuation\n");
945 break;
946 }
947 }
948 }
949 v->arch.hypercall_continuation = 1;
950 va_end(args);
951 return op;
952 }