ia64/xen-unstable

view xen/arch/ia64/xen/process.c @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents 14a34d811e81
children 3ab5ab4d6d75
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
19 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
21 #include <asm/system.h>
22 #include <asm/io.h>
23 #include <asm/processor.h>
24 #include <asm/desc.h>
25 //#include <asm/ldt.h>
26 #include <xen/irq.h>
27 #include <xen/event.h>
28 #include <asm/regionreg.h>
29 #include <asm/privop.h>
30 #include <asm/vcpu.h>
31 #include <asm/ia64_int.h>
32 #include <asm/dom_fw.h>
33 #include <asm/vhpt.h>
34 #include "hpsim_ssc.h"
35 #include <xen/multicall.h>
36 #include <asm/debugger.h>
38 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
39 /* FIXME: where these declarations shold be there ? */
40 extern void panic_domain(struct pt_regs *, const char *, ...);
41 extern long platform_is_hp_ski(void);
42 extern int ia64_hyperprivop(unsigned long, REGS *);
43 extern int ia64_hypercall(struct pt_regs *regs);
44 extern void vmx_do_launch(struct vcpu *);
45 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
47 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
48 // note IA64_PSR_PK removed from following, why is this necessary?
49 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
50 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
51 IA64_PSR_IT | IA64_PSR_BN)
53 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
54 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
55 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
56 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
57 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
58 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
60 #include <xen/sched-if.h>
62 void schedule_tail(struct vcpu *prev)
63 {
64 extern char ia64_ivt;
65 context_saved(prev);
67 if (VMX_DOMAIN(current)) {
68 vmx_do_launch(current);
69 } else {
70 ia64_set_iva(&ia64_ivt);
71 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
72 VHPT_ENABLED);
73 load_region_regs(current);
74 vcpu_load_kernel_regs(current);
75 }
76 }
78 void tdpfoo(void) { }
80 // given a domain virtual address, pte and pagesize, extract the metaphysical
81 // address, convert the pte for a physical address for (possibly different)
82 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
83 // PAGE_SIZE!)
84 u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
85 {
86 struct domain *d = current->domain;
87 ia64_itir_t itir = {.itir = itir__};
88 u64 mask, mpaddr, pteval2;
90 pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
92 // FIXME address had better be pre-validated on insert
93 mask = ~itir_mask(itir.itir);
94 mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
95 (address & mask);
96 #ifdef CONFIG_XEN_IA64_DOM0_VP
97 if (itir.ps > PAGE_SHIFT) {
98 itir.ps = PAGE_SHIFT;
99 }
100 #endif
101 *logps = itir.ps;
102 #ifndef CONFIG_XEN_IA64_DOM0_VP
103 if (d == dom0) {
104 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
105 /*
106 printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
107 mpaddr, ia64_get_itc());
108 */
109 tdpfoo();
110 }
111 }
112 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
113 /* Address beyond the limit. However the grant table is
114 also beyond the limit. Display a message if not in the
115 grant table. */
116 if (mpaddr >= IA64_GRANT_TABLE_PADDR
117 && mpaddr < (IA64_GRANT_TABLE_PADDR
118 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
119 printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
120 "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
121 mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
122 address, pteval, itir.itir);
123 tdpfoo();
124 }
125 #endif
126 pteval2 = lookup_domain_mpa(d,mpaddr);
127 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
128 pteval2 |= (pteval & _PAGE_ED);
129 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
130 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
131 return pteval2;
132 }
134 // given a current domain metaphysical address, return the physical address
135 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
136 {
137 unsigned long pteval;
139 #ifndef CONFIG_XEN_IA64_DOM0_VP
140 if (current->domain == dom0) {
141 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
142 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
143 mpaddr);
144 tdpfoo();
145 }
146 }
147 #endif
148 pteval = lookup_domain_mpa(current->domain,mpaddr);
149 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
150 }
152 unsigned long slow_reflect_count[0x80] = { 0 };
153 unsigned long fast_reflect_count[0x80] = { 0 };
155 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
157 void zero_reflect_counts(void)
158 {
159 int i;
160 for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
161 for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
162 }
164 int dump_reflect_counts(char *buf)
165 {
166 int i,j,cnt;
167 char *s = buf;
169 s += sprintf(s,"Slow reflections by vector:\n");
170 for (i = 0, j = 0; i < 0x80; i++) {
171 if ( (cnt = slow_reflect_count[i]) != 0 ) {
172 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
173 if ((j++ & 3) == 3) s += sprintf(s,"\n");
174 }
175 }
176 if (j & 3) s += sprintf(s,"\n");
177 s += sprintf(s,"Fast reflections by vector:\n");
178 for (i = 0, j = 0; i < 0x80; i++) {
179 if ( (cnt = fast_reflect_count[i]) != 0 ) {
180 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
181 if ((j++ & 3) == 3) s += sprintf(s,"\n");
182 }
183 }
184 if (j & 3) s += sprintf(s,"\n");
185 return s - buf;
186 }
188 // should never panic domain... if it does, stack may have been overrun
189 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
190 {
191 struct vcpu *v = current;
193 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
194 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
195 }
196 vector &= ~0xf;
197 if (vector != IA64_DATA_TLB_VECTOR &&
198 vector != IA64_ALT_DATA_TLB_VECTOR &&
199 vector != IA64_VHPT_TRANS_VECTOR) {
200 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%p,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
201 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
202 }
203 }
205 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
206 {
207 struct vcpu *v = current;
209 if (!PSCB(v,interrupt_collection_enabled))
210 check_bad_nested_interruption(isr,regs,vector);
211 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
212 PSCB(v,precover_ifs) = regs->cr_ifs;
213 vcpu_bsw0(v);
214 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
215 PSCB(v,isr) = isr;
216 PSCB(v,iip) = regs->cr_iip;
217 PSCB(v,ifs) = 0;
218 PSCB(v,incomplete_regframe) = 0;
220 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
221 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
222 regs->r31 = XSI_IPSR;
224 v->vcpu_info->evtchn_upcall_mask = 1;
225 PSCB(v,interrupt_collection_enabled) = 0;
227 inc_slow_reflect_count(vector);
228 }
230 void foodpi(void) {}
232 static unsigned long pending_false_positive = 0;
234 void reflect_extint(struct pt_regs *regs)
235 {
236 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
237 struct vcpu *v = current;
238 static int first_extint = 1;
240 if (first_extint) {
241 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
242 first_extint = 0;
243 }
244 if (vcpu_timer_pending_early(v))
245 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
246 PSCB(current,itir) = 0;
247 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
248 }
250 // ONLY gets called from ia64_leave_kernel
251 // ONLY call with interrupts disabled?? (else might miss one?)
252 // NEVER successful if already reflecting a trap/fault because psr.i==0
253 void deliver_pending_interrupt(struct pt_regs *regs)
254 {
255 struct domain *d = current->domain;
256 struct vcpu *v = current;
257 // FIXME: Will this work properly if doing an RFI???
258 if (!is_idle_domain(d) && user_mode(regs)) {
259 //vcpu_poke_timer(v);
260 if (vcpu_deliverable_interrupts(v))
261 reflect_extint(regs);
262 else if (PSCB(v,pending_interruption))
263 ++pending_false_positive;
264 }
265 }
266 unsigned long lazy_cover_count = 0;
268 int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
269 {
270 if (!PSCB(v,interrupt_collection_enabled)) {
271 PSCB(v,ifs) = regs->cr_ifs;
272 PSCB(v,incomplete_regframe) = 1;
273 regs->cr_ifs = 0;
274 lazy_cover_count++;
275 return(1); // retry same instruction with cr.ifs off
276 }
277 return(0);
278 }
280 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
281 {
282 unsigned long iip = regs->cr_iip, iha;
283 // FIXME should validate address here
284 unsigned long pteval;
285 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
286 IA64FAULT fault;
288 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
289 if ((isr & IA64_ISR_SP)
290 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
291 {
292 /*
293 * This fault was due to a speculative load or lfetch.fault, set the "ed"
294 * bit in the psr to ensure forward progress. (Target register will get a
295 * NaT for ld.s, lfetch will be canceled.)
296 */
297 ia64_psr(regs)->ed = 1;
298 return;
299 }
301 again:
302 fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
303 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
304 u64 logps;
305 pteval = translate_domain_pte(pteval, address, itir, &logps);
306 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
307 if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
308 /* dtlb has been purged in-between. This dtlb was
309 matching. Undo the work. */
310 #ifdef VHPT_GLOBAL
311 vhpt_flush_address (address, 1);
312 #endif
313 ia64_ptcl(address, 1<<2);
314 ia64_srlz_i();
315 goto again;
316 }
317 return;
318 }
320 if (!user_mode (regs)) {
321 /* The fault occurs inside Xen. */
322 if (!ia64_done_with_exception(regs)) {
323 // should never happen. If it does, region 0 addr may
324 // indicate a bad xen pointer
325 printk("*** xen_handle_domain_access: exception table"
326 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
327 iip, address);
328 panic_domain(regs,"*** xen_handle_domain_access: exception table"
329 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
330 iip, address);
331 }
332 return;
333 }
334 if (!PSCB(current,interrupt_collection_enabled)) {
335 check_bad_nested_interruption(isr,regs,fault);
336 //printf("Delivering NESTED DATA TLB fault\n");
337 fault = IA64_DATA_NESTED_TLB_VECTOR;
338 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
339 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
340 // NOTE: nested trap must NOT pass PSCB address
341 //regs->r31 = (unsigned long) &PSCB(current);
342 inc_slow_reflect_count(fault);
343 return;
344 }
346 PSCB(current,itir) = itir;
347 PSCB(current,iha) = iha;
348 PSCB(current,ifa) = address;
349 reflect_interruption(isr, regs, fault);
350 }
352 void
353 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
354 unsigned long iim, unsigned long itir, unsigned long arg5,
355 unsigned long arg6, unsigned long arg7, unsigned long stack)
356 {
357 struct pt_regs *regs = (struct pt_regs *) &stack;
358 unsigned long code;
359 char buf[128];
360 static const char * const reason[] = {
361 "IA-64 Illegal Operation fault",
362 "IA-64 Privileged Operation fault",
363 "IA-64 Privileged Register fault",
364 "IA-64 Reserved Register/Field fault",
365 "Disabled Instruction Set Transition fault",
366 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
367 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
368 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
369 };
370 #if 0
371 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
372 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
373 #endif
375 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
376 /*
377 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
378 * the lfetch.
379 */
380 ia64_psr(regs)->ed = 1;
381 printf("ia64_fault: handled lfetch.fault\n");
382 return;
383 }
385 switch (vector) {
386 case 24: /* General Exception */
387 code = (isr >> 4) & 0xf;
388 sprintf(buf, "General Exception: %s%s", reason[code],
389 (code == 3) ? ((isr & (1UL << 37))
390 ? " (RSE access)" : " (data access)") : "");
391 if (code == 8) {
392 # ifdef CONFIG_IA64_PRINT_HAZARDS
393 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
394 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
395 regs->pr);
396 # endif
397 printf("ia64_fault: returning on hazard\n");
398 return;
399 }
400 break;
402 case 25: /* Disabled FP-Register */
403 if (isr & 2) {
404 //disabled_fph_fault(regs);
405 //return;
406 }
407 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
408 break;
410 case 26: /* NaT Consumption */
411 if (user_mode(regs)) {
412 void *addr;
414 if (((isr >> 4) & 0xf) == 2) {
415 /* NaT page consumption */
416 //sig = SIGSEGV;
417 //code = SEGV_ACCERR;
418 addr = (void *) ifa;
419 } else {
420 /* register NaT consumption */
421 //sig = SIGILL;
422 //code = ILL_ILLOPN;
423 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
424 }
425 //siginfo.si_signo = sig;
426 //siginfo.si_code = code;
427 //siginfo.si_errno = 0;
428 //siginfo.si_addr = addr;
429 //siginfo.si_imm = vector;
430 //siginfo.si_flags = __ISR_VALID;
431 //siginfo.si_isr = isr;
432 //force_sig_info(sig, &siginfo, current);
433 //return;
434 } //else if (ia64_done_with_exception(regs))
435 //return;
436 sprintf(buf, "NaT consumption");
437 break;
439 case 31: /* Unsupported Data Reference */
440 if (user_mode(regs)) {
441 //siginfo.si_signo = SIGILL;
442 //siginfo.si_code = ILL_ILLOPN;
443 //siginfo.si_errno = 0;
444 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
445 //siginfo.si_imm = vector;
446 //siginfo.si_flags = __ISR_VALID;
447 //siginfo.si_isr = isr;
448 //force_sig_info(SIGILL, &siginfo, current);
449 //return;
450 }
451 sprintf(buf, "Unsupported data reference");
452 break;
454 case 29: /* Debug */
455 case 35: /* Taken Branch Trap */
456 case 36: /* Single Step Trap */
457 //if (fsys_mode(current, regs)) {}
458 switch (vector) {
459 case 29:
460 //siginfo.si_code = TRAP_HWBKPT;
461 #ifdef CONFIG_ITANIUM
462 /*
463 * Erratum 10 (IFA may contain incorrect address) now has
464 * "NoFix" status. There are no plans for fixing this.
465 */
466 if (ia64_psr(regs)->is == 0)
467 ifa = regs->cr_iip;
468 #endif
469 break;
470 case 35: ifa = 0; break;
471 case 36: ifa = 0; break;
472 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
473 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
474 }
475 //siginfo.si_signo = SIGTRAP;
476 //siginfo.si_errno = 0;
477 //siginfo.si_addr = (void *) ifa;
478 //siginfo.si_imm = 0;
479 //siginfo.si_flags = __ISR_VALID;
480 //siginfo.si_isr = isr;
481 //force_sig_info(SIGTRAP, &siginfo, current);
482 //return;
484 case 32: /* fp fault */
485 case 33: /* fp trap */
486 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
487 //if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
488 //siginfo.si_signo = SIGFPE;
489 //siginfo.si_errno = 0;
490 //siginfo.si_code = FPE_FLTINV;
491 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
492 //siginfo.si_flags = __ISR_VALID;
493 //siginfo.si_isr = isr;
494 //siginfo.si_imm = 0;
495 //force_sig_info(SIGFPE, &siginfo, current);
496 //}
497 //return;
498 sprintf(buf, "FP fault/trap");
499 break;
501 case 34:
502 if (isr & 0x2) {
503 /* Lower-Privilege Transfer Trap */
504 /*
505 * Just clear PSR.lp and then return immediately: all the
506 * interesting work (e.g., signal delivery is done in the kernel
507 * exit path).
508 */
509 //ia64_psr(regs)->lp = 0;
510 //return;
511 sprintf(buf, "Lower-Privilege Transfer trap");
512 } else {
513 /* Unimplemented Instr. Address Trap */
514 if (user_mode(regs)) {
515 //siginfo.si_signo = SIGILL;
516 //siginfo.si_code = ILL_BADIADDR;
517 //siginfo.si_errno = 0;
518 //siginfo.si_flags = 0;
519 //siginfo.si_isr = 0;
520 //siginfo.si_imm = 0;
521 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
522 //force_sig_info(SIGILL, &siginfo, current);
523 //return;
524 }
525 sprintf(buf, "Unimplemented Instruction Address fault");
526 }
527 break;
529 case 45:
530 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
531 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
532 regs->cr_iip, ifa, isr);
533 //force_sig(SIGSEGV, current);
534 break;
536 case 46:
537 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
538 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
539 regs->cr_iip, ifa, isr, iim);
540 //force_sig(SIGSEGV, current);
541 return;
543 case 47:
544 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
545 break;
547 default:
548 sprintf(buf, "Fault %lu", vector);
549 break;
550 }
551 //die_if_kernel(buf, regs, error);
552 printk("ia64_fault: %s: reflecting\n",buf);
553 PSCB(current,itir) = vcpu_get_itir_on_fault(current,ifa);
554 PSCB(current,ifa) = ifa;
555 reflect_interruption(isr,regs,IA64_GENEX_VECTOR);
556 //while(1);
557 //force_sig(SIGILL, current);
558 }
560 unsigned long running_on_sim = 0;
562 void
563 do_ssc(unsigned long ssc, struct pt_regs *regs)
564 {
565 unsigned long arg0, arg1, arg2, arg3, retval;
566 char buf[2];
567 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
568 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
569 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
571 arg0 = vcpu_get_gr(current,32);
572 switch(ssc) {
573 case SSC_PUTCHAR:
574 buf[0] = arg0;
575 buf[1] = '\0';
576 printf(buf);
577 break;
578 case SSC_GETCHAR:
579 retval = ia64_ssc(0,0,0,0,ssc);
580 vcpu_set_gr(current,8,retval,0);
581 break;
582 case SSC_WAIT_COMPLETION:
583 if (arg0) { // metaphysical address
585 arg0 = translate_domain_mpaddr(arg0);
586 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
587 ///**/ if (stat->fd == last_fd) stat->count = last_count;
588 /**/ stat->count = last_count;
589 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
590 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
591 /**/ retval = 0;
592 }
593 else retval = -1L;
594 vcpu_set_gr(current,8,retval,0);
595 break;
596 case SSC_OPEN:
597 arg1 = vcpu_get_gr(current,33); // access rights
598 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
599 if (arg0) { // metaphysical address
600 arg0 = translate_domain_mpaddr(arg0);
601 retval = ia64_ssc(arg0,arg1,0,0,ssc);
602 }
603 else retval = -1L;
604 vcpu_set_gr(current,8,retval,0);
605 break;
606 case SSC_WRITE:
607 case SSC_READ:
608 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
609 arg1 = vcpu_get_gr(current,33);
610 arg2 = vcpu_get_gr(current,34);
611 arg3 = vcpu_get_gr(current,35);
612 if (arg2) { // metaphysical address of descriptor
613 struct ssc_disk_req *req;
614 unsigned long mpaddr;
615 long len;
617 arg2 = translate_domain_mpaddr(arg2);
618 req = (struct ssc_disk_req *) __va(arg2);
619 req->len &= 0xffffffffL; // avoid strange bug
620 len = req->len;
621 /**/ last_fd = arg1;
622 /**/ last_count = len;
623 mpaddr = req->addr;
624 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
625 retval = 0;
626 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
627 // do partial page first
628 req->addr = translate_domain_mpaddr(mpaddr);
629 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
630 len -= req->len; mpaddr += req->len;
631 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
632 arg3 += req->len; // file offset
633 /**/ last_stat.fd = last_fd;
634 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
635 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
636 }
637 if (retval >= 0) while (len > 0) {
638 req->addr = translate_domain_mpaddr(mpaddr);
639 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
640 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
641 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
642 arg3 += req->len; // file offset
643 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
644 /**/ last_stat.fd = last_fd;
645 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
646 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
647 }
648 // set it back to the original value
649 req->len = last_count;
650 }
651 else retval = -1L;
652 vcpu_set_gr(current,8,retval,0);
653 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
654 break;
655 case SSC_CONNECT_INTERRUPT:
656 arg1 = vcpu_get_gr(current,33);
657 arg2 = vcpu_get_gr(current,34);
658 arg3 = vcpu_get_gr(current,35);
659 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
660 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
661 break;
662 case SSC_NETDEV_PROBE:
663 vcpu_set_gr(current,8,-1L,0);
664 break;
665 default:
666 printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
667 ssc, regs->cr_iip, regs->b0);
668 while(1);
669 break;
670 }
671 vcpu_increment_iip(current);
672 }
674 /* Also read in hyperprivop.S */
675 int first_break = 1;
677 void
678 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
679 {
680 struct domain *d = current->domain;
681 struct vcpu *v = current;
683 if (first_break) {
684 if (platform_is_hp_ski()) running_on_sim = 1;
685 else running_on_sim = 0;
686 first_break = 0;
687 }
688 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
689 do_ssc(vcpu_get_gr(current,36), regs);
690 }
691 #ifdef CRASH_DEBUG
692 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
693 if (iim == 0)
694 show_registers(regs);
695 debugger_trap_fatal(0 /* don't care */, regs);
696 }
697 #endif
698 else if (iim == d->arch.breakimm) {
699 /* by default, do not continue */
700 v->arch.hypercall_continuation = 0;
702 if (ia64_hypercall(regs) &&
703 !PSCBX(v, hypercall_continuation))
704 vcpu_increment_iip(current);
705 }
706 else if (!PSCB(v,interrupt_collection_enabled)) {
707 if (ia64_hyperprivop(iim,regs))
708 vcpu_increment_iip(current);
709 }
710 else {
711 if (iim == 0)
712 die_if_kernel("bug check", regs, iim);
713 PSCB(v,iim) = iim;
714 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
715 }
716 }
718 void
719 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
720 {
721 IA64FAULT vector;
723 vector = priv_emulate(current,regs,isr);
724 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
725 // Note: if a path results in a vector to reflect that requires
726 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
727 reflect_interruption(isr,regs,vector);
728 }
729 }
731 /* Used in vhpt.h. */
732 #define INTR_TYPE_MAX 10
733 UINT64 int_counts[INTR_TYPE_MAX];
735 void
736 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
737 {
738 struct vcpu *v = current;
739 unsigned long check_lazy_cover = 0;
740 unsigned long psr = regs->cr_ipsr;
742 /* Following faults shouldn'g be seen from Xen itself */
743 if (!(psr & IA64_PSR_CPL)) BUG();
745 switch(vector) {
746 case 8:
747 vector = IA64_DIRTY_BIT_VECTOR; break;
748 case 9:
749 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
750 case 10:
751 check_lazy_cover = 1;
752 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
753 case 20:
754 check_lazy_cover = 1;
755 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
756 case 22:
757 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
758 case 23:
759 check_lazy_cover = 1;
760 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
761 case 25:
762 vector = IA64_DISABLED_FPREG_VECTOR;
763 break;
764 case 26:
765 if (((isr >> 4L) & 0xfL) == 1) {
766 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
767 printf("ia64_handle_reflection: handling regNaT fault");
768 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
769 }
770 #if 1
771 // pass null pointer dereferences through with no error
772 // but retain debug output for non-zero ifa
773 if (!ifa) {
774 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
775 }
776 #endif
777 printf("*** NaT fault... attempting to handle as privop\n");
778 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
779 isr, ifa, regs->cr_iip, psr);
780 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
781 // certain NaT faults are higher priority than privop faults
782 vector = priv_emulate(v,regs,isr);
783 if (vector == IA64_NO_FAULT) {
784 printf("*** Handled privop masquerading as NaT fault\n");
785 return;
786 }
787 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
788 case 27:
789 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
790 PSCB(current,iim) = iim;
791 vector = IA64_SPECULATION_VECTOR; break;
792 case 30:
793 // FIXME: Should we handle unaligned refs in Xen??
794 vector = IA64_UNALIGNED_REF_VECTOR; break;
795 case 32:
796 printf("ia64_handle_reflection: handling FP fault");
797 vector = IA64_FP_FAULT_VECTOR; break;
798 case 33:
799 printf("ia64_handle_reflection: handling FP trap");
800 vector = IA64_FP_TRAP_VECTOR; break;
801 case 34:
802 printf("ia64_handle_reflection: handling lowerpriv trap");
803 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
804 case 35:
805 printf("ia64_handle_reflection: handling taken branch trap");
806 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
807 case 36:
808 printf("ia64_handle_reflection: handling single step trap");
809 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
811 default:
812 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
813 while(vector);
814 return;
815 }
816 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
817 PSCB(current,ifa) = ifa;
818 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
819 reflect_interruption(isr,regs,vector);
820 }
822 unsigned long hypercall_create_continuation(
823 unsigned int op, const char *format, ...)
824 {
825 struct mc_state *mcs = &mc_state[smp_processor_id()];
826 struct vcpu *v = current;
827 const char *p = format;
828 unsigned long arg;
829 unsigned int i;
830 va_list args;
832 va_start(args, format);
833 if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
834 panic("PREEMPT happen in multicall\n"); // Not support yet
835 } else {
836 vcpu_set_gr(v, 2, op, 0);
837 for ( i = 0; *p != '\0'; i++) {
838 switch ( *p++ )
839 {
840 case 'i':
841 arg = (unsigned long)va_arg(args, unsigned int);
842 break;
843 case 'l':
844 arg = (unsigned long)va_arg(args, unsigned long);
845 break;
846 case 'h':
847 arg = (unsigned long)va_arg(args, void *);
848 break;
849 default:
850 arg = 0;
851 BUG();
852 }
853 switch (i) {
854 case 0: vcpu_set_gr(v, 14, arg, 0);
855 break;
856 case 1: vcpu_set_gr(v, 15, arg, 0);
857 break;
858 case 2: vcpu_set_gr(v, 16, arg, 0);
859 break;
860 case 3: vcpu_set_gr(v, 17, arg, 0);
861 break;
862 case 4: vcpu_set_gr(v, 18, arg, 0);
863 break;
864 default: panic("Too many args for hypercall continuation\n");
865 break;
866 }
867 }
868 }
869 v->arch.hypercall_continuation = 1;
870 va_end(args);
871 return op;
872 }